1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2007 Oracle. All rights reserved.
6 #include <crypto/hash.h>
7 #include <linux/kernel.h>
9 #include <linux/blk-cgroup.h>
10 #include <linux/file.h>
12 #include <linux/pagemap.h>
13 #include <linux/highmem.h>
14 #include <linux/time.h>
15 #include <linux/init.h>
16 #include <linux/string.h>
17 #include <linux/backing-dev.h>
18 #include <linux/writeback.h>
19 #include <linux/compat.h>
20 #include <linux/xattr.h>
21 #include <linux/posix_acl.h>
22 #include <linux/falloc.h>
23 #include <linux/slab.h>
24 #include <linux/ratelimit.h>
25 #include <linux/btrfs.h>
26 #include <linux/blkdev.h>
27 #include <linux/posix_acl_xattr.h>
28 #include <linux/uio.h>
29 #include <linux/magic.h>
30 #include <linux/iversion.h>
31 #include <linux/swap.h>
32 #include <linux/migrate.h>
33 #include <linux/sched/mm.h>
34 #include <linux/iomap.h>
35 #include <linux/unaligned.h>
36 #include <linux/fsverity.h>
40 #include "transaction.h"
41 #include "btrfs_inode.h"
42 #include "ordered-data.h"
46 #include "compression.h"
50 #include "delalloc-space.h"
51 #include "block-group.h"
52 #include "space-info.h"
55 #include "inode-item.h"
57 #include "accessors.h"
58 #include "extent-tree.h"
59 #include "root-tree.h"
62 #include "file-item.h"
63 #include "uuid-tree.h"
67 #include "relocation.h"
72 #include "raid-stripe-tree.h"
75 struct btrfs_iget_args
{
77 struct btrfs_root
*root
;
80 struct btrfs_rename_ctx
{
81 /* Output field. Stores the index number of the old directory entry. */
86 * Used by data_reloc_print_warning_inode() to pass needed info for filename
87 * resolution and output of error message.
89 struct data_reloc_warn
{
90 struct btrfs_path path
;
91 struct btrfs_fs_info
*fs_info
;
98 * For the file_extent_tree, we want to hold the inode lock when we lookup and
99 * update the disk_i_size, but lockdep will complain because our io_tree we hold
100 * the tree lock and get the inode lock when setting delalloc. These two things
101 * are unrelated, so make a class for the file_extent_tree so we don't get the
102 * two locking patterns mixed up.
104 static struct lock_class_key file_extent_tree_class
;
106 static const struct inode_operations btrfs_dir_inode_operations
;
107 static const struct inode_operations btrfs_symlink_inode_operations
;
108 static const struct inode_operations btrfs_special_inode_operations
;
109 static const struct inode_operations btrfs_file_inode_operations
;
110 static const struct address_space_operations btrfs_aops
;
111 static const struct file_operations btrfs_dir_file_operations
;
113 static struct kmem_cache
*btrfs_inode_cachep
;
115 static int btrfs_setsize(struct inode
*inode
, struct iattr
*attr
);
116 static int btrfs_truncate(struct btrfs_inode
*inode
, bool skip_writeback
);
118 static noinline
int run_delalloc_cow(struct btrfs_inode
*inode
,
119 struct folio
*locked_folio
, u64 start
,
120 u64 end
, struct writeback_control
*wbc
,
123 static int data_reloc_print_warning_inode(u64 inum
, u64 offset
, u64 num_bytes
,
124 u64 root
, void *warn_ctx
)
126 struct data_reloc_warn
*warn
= warn_ctx
;
127 struct btrfs_fs_info
*fs_info
= warn
->fs_info
;
128 struct extent_buffer
*eb
;
129 struct btrfs_inode_item
*inode_item
;
130 struct inode_fs_paths
*ipath
= NULL
;
131 struct btrfs_root
*local_root
;
132 struct btrfs_key key
;
133 unsigned int nofs_flag
;
137 local_root
= btrfs_get_fs_root(fs_info
, root
, true);
138 if (IS_ERR(local_root
)) {
139 ret
= PTR_ERR(local_root
);
143 /* This makes the path point to (inum INODE_ITEM ioff). */
145 key
.type
= BTRFS_INODE_ITEM_KEY
;
148 ret
= btrfs_search_slot(NULL
, local_root
, &key
, &warn
->path
, 0, 0);
150 btrfs_put_root(local_root
);
151 btrfs_release_path(&warn
->path
);
155 eb
= warn
->path
.nodes
[0];
156 inode_item
= btrfs_item_ptr(eb
, warn
->path
.slots
[0], struct btrfs_inode_item
);
157 nlink
= btrfs_inode_nlink(eb
, inode_item
);
158 btrfs_release_path(&warn
->path
);
160 nofs_flag
= memalloc_nofs_save();
161 ipath
= init_ipath(4096, local_root
, &warn
->path
);
162 memalloc_nofs_restore(nofs_flag
);
164 btrfs_put_root(local_root
);
165 ret
= PTR_ERR(ipath
);
168 * -ENOMEM, not a critical error, just output an generic error
172 "checksum error at logical %llu mirror %u root %llu, inode %llu offset %llu",
173 warn
->logical
, warn
->mirror_num
, root
, inum
, offset
);
176 ret
= paths_from_inode(inum
, ipath
);
181 * We deliberately ignore the bit ipath might have been too small to
182 * hold all of the paths here
184 for (int i
= 0; i
< ipath
->fspath
->elem_cnt
; i
++) {
186 "checksum error at logical %llu mirror %u root %llu inode %llu offset %llu length %u links %u (path: %s)",
187 warn
->logical
, warn
->mirror_num
, root
, inum
, offset
,
188 fs_info
->sectorsize
, nlink
,
189 (char *)(unsigned long)ipath
->fspath
->val
[i
]);
192 btrfs_put_root(local_root
);
198 "checksum error at logical %llu mirror %u root %llu inode %llu offset %llu, path resolving failed with ret=%d",
199 warn
->logical
, warn
->mirror_num
, root
, inum
, offset
, ret
);
206 * Do extra user-friendly error output (e.g. lookup all the affected files).
208 * Return true if we succeeded doing the backref lookup.
209 * Return false if such lookup failed, and has to fallback to the old error message.
211 static void print_data_reloc_error(const struct btrfs_inode
*inode
, u64 file_off
,
212 const u8
*csum
, const u8
*csum_expected
,
215 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
216 struct btrfs_path path
= { 0 };
217 struct btrfs_key found_key
= { 0 };
218 struct extent_buffer
*eb
;
219 struct btrfs_extent_item
*ei
;
220 const u32 csum_size
= fs_info
->csum_size
;
226 mutex_lock(&fs_info
->reloc_mutex
);
227 logical
= btrfs_get_reloc_bg_bytenr(fs_info
);
228 mutex_unlock(&fs_info
->reloc_mutex
);
230 if (logical
== U64_MAX
) {
231 btrfs_warn_rl(fs_info
, "has data reloc tree but no running relocation");
232 btrfs_warn_rl(fs_info
,
233 "csum failed root %lld ino %llu off %llu csum " CSUM_FMT
" expected csum " CSUM_FMT
" mirror %d",
234 btrfs_root_id(inode
->root
), btrfs_ino(inode
), file_off
,
235 CSUM_FMT_VALUE(csum_size
, csum
),
236 CSUM_FMT_VALUE(csum_size
, csum_expected
),
242 btrfs_warn_rl(fs_info
,
243 "csum failed root %lld ino %llu off %llu logical %llu csum " CSUM_FMT
" expected csum " CSUM_FMT
" mirror %d",
244 btrfs_root_id(inode
->root
),
245 btrfs_ino(inode
), file_off
, logical
,
246 CSUM_FMT_VALUE(csum_size
, csum
),
247 CSUM_FMT_VALUE(csum_size
, csum_expected
),
250 ret
= extent_from_logical(fs_info
, logical
, &path
, &found_key
, &flags
);
252 btrfs_err_rl(fs_info
, "failed to lookup extent item for logical %llu: %d",
257 ei
= btrfs_item_ptr(eb
, path
.slots
[0], struct btrfs_extent_item
);
258 item_size
= btrfs_item_size(eb
, path
.slots
[0]);
259 if (flags
& BTRFS_EXTENT_FLAG_TREE_BLOCK
) {
260 unsigned long ptr
= 0;
265 ret
= tree_backref_for_extent(&ptr
, eb
, &found_key
, ei
,
266 item_size
, &ref_root
,
269 btrfs_warn_rl(fs_info
,
270 "failed to resolve tree backref for logical %llu: %d",
277 btrfs_warn_rl(fs_info
,
278 "csum error at logical %llu mirror %u: metadata %s (level %d) in tree %llu",
280 (ref_level
? "node" : "leaf"),
281 ref_level
, ref_root
);
283 btrfs_release_path(&path
);
285 struct btrfs_backref_walk_ctx ctx
= { 0 };
286 struct data_reloc_warn reloc_warn
= { 0 };
288 btrfs_release_path(&path
);
290 ctx
.bytenr
= found_key
.objectid
;
291 ctx
.extent_item_pos
= logical
- found_key
.objectid
;
292 ctx
.fs_info
= fs_info
;
294 reloc_warn
.logical
= logical
;
295 reloc_warn
.extent_item_size
= found_key
.offset
;
296 reloc_warn
.mirror_num
= mirror_num
;
297 reloc_warn
.fs_info
= fs_info
;
299 iterate_extent_inodes(&ctx
, true,
300 data_reloc_print_warning_inode
, &reloc_warn
);
304 static void __cold
btrfs_print_data_csum_error(struct btrfs_inode
*inode
,
305 u64 logical_start
, u8
*csum
, u8
*csum_expected
, int mirror_num
)
307 struct btrfs_root
*root
= inode
->root
;
308 const u32 csum_size
= root
->fs_info
->csum_size
;
310 /* For data reloc tree, it's better to do a backref lookup instead. */
311 if (btrfs_root_id(root
) == BTRFS_DATA_RELOC_TREE_OBJECTID
)
312 return print_data_reloc_error(inode
, logical_start
, csum
,
313 csum_expected
, mirror_num
);
315 /* Output without objectid, which is more meaningful */
316 if (btrfs_root_id(root
) >= BTRFS_LAST_FREE_OBJECTID
) {
317 btrfs_warn_rl(root
->fs_info
,
318 "csum failed root %lld ino %lld off %llu csum " CSUM_FMT
" expected csum " CSUM_FMT
" mirror %d",
319 btrfs_root_id(root
), btrfs_ino(inode
),
321 CSUM_FMT_VALUE(csum_size
, csum
),
322 CSUM_FMT_VALUE(csum_size
, csum_expected
),
325 btrfs_warn_rl(root
->fs_info
,
326 "csum failed root %llu ino %llu off %llu csum " CSUM_FMT
" expected csum " CSUM_FMT
" mirror %d",
327 btrfs_root_id(root
), btrfs_ino(inode
),
329 CSUM_FMT_VALUE(csum_size
, csum
),
330 CSUM_FMT_VALUE(csum_size
, csum_expected
),
336 * Lock inode i_rwsem based on arguments passed.
338 * ilock_flags can have the following bit set:
340 * BTRFS_ILOCK_SHARED - acquire a shared lock on the inode
341 * BTRFS_ILOCK_TRY - try to acquire the lock, if fails on first attempt
343 * BTRFS_ILOCK_MMAP - acquire a write lock on the i_mmap_lock
345 int btrfs_inode_lock(struct btrfs_inode
*inode
, unsigned int ilock_flags
)
347 if (ilock_flags
& BTRFS_ILOCK_SHARED
) {
348 if (ilock_flags
& BTRFS_ILOCK_TRY
) {
349 if (!inode_trylock_shared(&inode
->vfs_inode
))
354 inode_lock_shared(&inode
->vfs_inode
);
356 if (ilock_flags
& BTRFS_ILOCK_TRY
) {
357 if (!inode_trylock(&inode
->vfs_inode
))
362 inode_lock(&inode
->vfs_inode
);
364 if (ilock_flags
& BTRFS_ILOCK_MMAP
)
365 down_write(&inode
->i_mmap_lock
);
370 * Unock inode i_rwsem.
372 * ilock_flags should contain the same bits set as passed to btrfs_inode_lock()
373 * to decide whether the lock acquired is shared or exclusive.
375 void btrfs_inode_unlock(struct btrfs_inode
*inode
, unsigned int ilock_flags
)
377 if (ilock_flags
& BTRFS_ILOCK_MMAP
)
378 up_write(&inode
->i_mmap_lock
);
379 if (ilock_flags
& BTRFS_ILOCK_SHARED
)
380 inode_unlock_shared(&inode
->vfs_inode
);
382 inode_unlock(&inode
->vfs_inode
);
386 * Cleanup all submitted ordered extents in specified range to handle errors
387 * from the btrfs_run_delalloc_range() callback.
389 * NOTE: caller must ensure that when an error happens, it can not call
390 * extent_clear_unlock_delalloc() to clear both the bits EXTENT_DO_ACCOUNTING
391 * and EXTENT_DELALLOC simultaneously, because that causes the reserved metadata
392 * to be released, which we want to happen only when finishing the ordered
393 * extent (btrfs_finish_ordered_io()).
395 static inline void btrfs_cleanup_ordered_extents(struct btrfs_inode
*inode
,
396 struct folio
*locked_folio
,
397 u64 offset
, u64 bytes
)
399 unsigned long index
= offset
>> PAGE_SHIFT
;
400 unsigned long end_index
= (offset
+ bytes
- 1) >> PAGE_SHIFT
;
401 u64 page_start
= 0, page_end
= 0;
405 page_start
= folio_pos(locked_folio
);
406 page_end
= page_start
+ folio_size(locked_folio
) - 1;
409 while (index
<= end_index
) {
411 * For locked page, we will call btrfs_mark_ordered_io_finished
412 * through btrfs_mark_ordered_io_finished() on it
413 * in run_delalloc_range() for the error handling, which will
414 * clear page Ordered and run the ordered extent accounting.
416 * Here we can't just clear the Ordered bit, or
417 * btrfs_mark_ordered_io_finished() would skip the accounting
418 * for the page range, and the ordered extent will never finish.
420 if (locked_folio
&& index
== (page_start
>> PAGE_SHIFT
)) {
424 folio
= filemap_get_folio(inode
->vfs_inode
.i_mapping
, index
);
430 * Here we just clear all Ordered bits for every page in the
431 * range, then btrfs_mark_ordered_io_finished() will handle
432 * the ordered extent accounting for the range.
434 btrfs_folio_clamp_clear_ordered(inode
->root
->fs_info
, folio
,
440 /* The locked page covers the full range, nothing needs to be done */
441 if (bytes
+ offset
<= page_start
+ folio_size(locked_folio
))
444 * In case this page belongs to the delalloc range being
445 * instantiated then skip it, since the first page of a range is
446 * going to be properly cleaned up by the caller of
449 if (page_start
>= offset
&& page_end
<= (offset
+ bytes
- 1)) {
450 bytes
= offset
+ bytes
- folio_pos(locked_folio
) -
451 folio_size(locked_folio
);
452 offset
= folio_pos(locked_folio
) + folio_size(locked_folio
);
456 return btrfs_mark_ordered_io_finished(inode
, NULL
, offset
, bytes
, false);
459 static int btrfs_dirty_inode(struct btrfs_inode
*inode
);
461 static int btrfs_init_inode_security(struct btrfs_trans_handle
*trans
,
462 struct btrfs_new_inode_args
*args
)
466 if (args
->default_acl
) {
467 err
= __btrfs_set_acl(trans
, args
->inode
, args
->default_acl
,
473 err
= __btrfs_set_acl(trans
, args
->inode
, args
->acl
, ACL_TYPE_ACCESS
);
477 if (!args
->default_acl
&& !args
->acl
)
478 cache_no_acl(args
->inode
);
479 return btrfs_xattr_security_init(trans
, args
->inode
, args
->dir
,
480 &args
->dentry
->d_name
);
484 * this does all the hard work for inserting an inline extent into
485 * the btree. The caller should have done a btrfs_drop_extents so that
486 * no overlapping inline items exist in the btree
488 static int insert_inline_extent(struct btrfs_trans_handle
*trans
,
489 struct btrfs_path
*path
,
490 struct btrfs_inode
*inode
, bool extent_inserted
,
491 size_t size
, size_t compressed_size
,
493 struct folio
*compressed_folio
,
496 struct btrfs_root
*root
= inode
->root
;
497 struct extent_buffer
*leaf
;
498 const u32 sectorsize
= trans
->fs_info
->sectorsize
;
501 struct btrfs_file_extent_item
*ei
;
503 size_t cur_size
= size
;
507 * The decompressed size must still be no larger than a sector. Under
508 * heavy race, we can have size == 0 passed in, but that shouldn't be a
509 * big deal and we can continue the insertion.
511 ASSERT(size
<= sectorsize
);
514 * The compressed size also needs to be no larger than a sector.
515 * That's also why we only need one page as the parameter.
517 if (compressed_folio
)
518 ASSERT(compressed_size
<= sectorsize
);
520 ASSERT(compressed_size
== 0);
522 if (compressed_size
&& compressed_folio
)
523 cur_size
= compressed_size
;
525 if (!extent_inserted
) {
526 struct btrfs_key key
;
529 key
.objectid
= btrfs_ino(inode
);
531 key
.type
= BTRFS_EXTENT_DATA_KEY
;
533 datasize
= btrfs_file_extent_calc_inline_size(cur_size
);
534 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
,
539 leaf
= path
->nodes
[0];
540 ei
= btrfs_item_ptr(leaf
, path
->slots
[0],
541 struct btrfs_file_extent_item
);
542 btrfs_set_file_extent_generation(leaf
, ei
, trans
->transid
);
543 btrfs_set_file_extent_type(leaf
, ei
, BTRFS_FILE_EXTENT_INLINE
);
544 btrfs_set_file_extent_encryption(leaf
, ei
, 0);
545 btrfs_set_file_extent_other_encoding(leaf
, ei
, 0);
546 btrfs_set_file_extent_ram_bytes(leaf
, ei
, size
);
547 ptr
= btrfs_file_extent_inline_start(ei
);
549 if (compress_type
!= BTRFS_COMPRESS_NONE
) {
550 kaddr
= kmap_local_folio(compressed_folio
, 0);
551 write_extent_buffer(leaf
, kaddr
, ptr
, compressed_size
);
554 btrfs_set_file_extent_compression(leaf
, ei
,
559 folio
= filemap_get_folio(inode
->vfs_inode
.i_mapping
, 0);
560 ASSERT(!IS_ERR(folio
));
561 btrfs_set_file_extent_compression(leaf
, ei
, 0);
562 kaddr
= kmap_local_folio(folio
, 0);
563 write_extent_buffer(leaf
, kaddr
, ptr
, size
);
567 btrfs_mark_buffer_dirty(trans
, leaf
);
568 btrfs_release_path(path
);
571 * We align size to sectorsize for inline extents just for simplicity
574 ret
= btrfs_inode_set_file_extent_range(inode
, 0,
575 ALIGN(size
, root
->fs_info
->sectorsize
));
580 * We're an inline extent, so nobody can extend the file past i_size
581 * without locking a page we already have locked.
583 * We must do any i_size and inode updates before we unlock the pages.
584 * Otherwise we could end up racing with unlink.
586 i_size
= i_size_read(&inode
->vfs_inode
);
587 if (update_i_size
&& size
> i_size
) {
588 i_size_write(&inode
->vfs_inode
, size
);
591 inode
->disk_i_size
= i_size
;
597 static bool can_cow_file_range_inline(struct btrfs_inode
*inode
,
598 u64 offset
, u64 size
,
599 size_t compressed_size
)
601 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
602 u64 data_len
= (compressed_size
?: size
);
604 /* Inline extents must start at offset 0. */
609 * Due to the page size limit, for subpage we can only trigger the
610 * writeback for the dirty sectors of page, that means data writeback
611 * is doing more writeback than what we want.
613 * This is especially unexpected for some call sites like fallocate,
614 * where we only increase i_size after everything is done.
615 * This means we can trigger inline extent even if we didn't want to.
616 * So here we skip inline extent creation completely.
618 if (fs_info
->sectorsize
!= PAGE_SIZE
)
621 /* Inline extents are limited to sectorsize. */
622 if (size
> fs_info
->sectorsize
)
625 /* We cannot exceed the maximum inline data size. */
626 if (data_len
> BTRFS_MAX_INLINE_DATA_SIZE(fs_info
))
629 /* We cannot exceed the user specified max_inline size. */
630 if (data_len
> fs_info
->max_inline
)
633 /* Inline extents must be the entirety of the file. */
634 if (size
< i_size_read(&inode
->vfs_inode
))
641 * conditionally insert an inline extent into the file. This
642 * does the checks required to make sure the data is small enough
643 * to fit as an inline extent.
645 * If being used directly, you must have already checked we're allowed to cow
646 * the range by getting true from can_cow_file_range_inline().
648 static noinline
int __cow_file_range_inline(struct btrfs_inode
*inode
,
649 u64 size
, size_t compressed_size
,
651 struct folio
*compressed_folio
,
654 struct btrfs_drop_extents_args drop_args
= { 0 };
655 struct btrfs_root
*root
= inode
->root
;
656 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
657 struct btrfs_trans_handle
*trans
;
658 u64 data_len
= (compressed_size
?: size
);
660 struct btrfs_path
*path
;
662 path
= btrfs_alloc_path();
666 trans
= btrfs_join_transaction(root
);
668 btrfs_free_path(path
);
669 return PTR_ERR(trans
);
671 trans
->block_rsv
= &inode
->block_rsv
;
673 drop_args
.path
= path
;
675 drop_args
.end
= fs_info
->sectorsize
;
676 drop_args
.drop_cache
= true;
677 drop_args
.replace_extent
= true;
678 drop_args
.extent_item_size
= btrfs_file_extent_calc_inline_size(data_len
);
679 ret
= btrfs_drop_extents(trans
, root
, inode
, &drop_args
);
681 btrfs_abort_transaction(trans
, ret
);
685 ret
= insert_inline_extent(trans
, path
, inode
, drop_args
.extent_inserted
,
686 size
, compressed_size
, compress_type
,
687 compressed_folio
, update_i_size
);
688 if (ret
&& ret
!= -ENOSPC
) {
689 btrfs_abort_transaction(trans
, ret
);
691 } else if (ret
== -ENOSPC
) {
696 btrfs_update_inode_bytes(inode
, size
, drop_args
.bytes_found
);
697 ret
= btrfs_update_inode(trans
, inode
);
698 if (ret
&& ret
!= -ENOSPC
) {
699 btrfs_abort_transaction(trans
, ret
);
701 } else if (ret
== -ENOSPC
) {
706 btrfs_set_inode_full_sync(inode
);
709 * Don't forget to free the reserved space, as for inlined extent
710 * it won't count as data extent, free them directly here.
711 * And at reserve time, it's always aligned to page size, so
712 * just free one page here.
714 btrfs_qgroup_free_data(inode
, NULL
, 0, PAGE_SIZE
, NULL
);
715 btrfs_free_path(path
);
716 btrfs_end_transaction(trans
);
720 static noinline
int cow_file_range_inline(struct btrfs_inode
*inode
,
721 struct folio
*locked_folio
,
723 size_t compressed_size
,
725 struct folio
*compressed_folio
,
728 struct extent_state
*cached
= NULL
;
729 unsigned long clear_flags
= EXTENT_DELALLOC
| EXTENT_DELALLOC_NEW
|
730 EXTENT_DEFRAG
| EXTENT_DO_ACCOUNTING
| EXTENT_LOCKED
;
731 u64 size
= min_t(u64
, i_size_read(&inode
->vfs_inode
), end
+ 1);
734 if (!can_cow_file_range_inline(inode
, offset
, size
, compressed_size
))
737 lock_extent(&inode
->io_tree
, offset
, end
, &cached
);
738 ret
= __cow_file_range_inline(inode
, size
, compressed_size
,
739 compress_type
, compressed_folio
,
742 unlock_extent(&inode
->io_tree
, offset
, end
, &cached
);
747 * In the successful case (ret == 0 here), cow_file_range will return 1.
749 * Quite a bit further up the callstack in extent_writepage(), ret == 1
750 * is treated as a short circuited success and does not unlock the folio,
751 * so we must do it here.
753 * In the failure case, the locked_folio does get unlocked by
754 * btrfs_folio_end_all_writers, which asserts that it is still locked
755 * at that point, so we must *not* unlock it here.
757 * The other two callsites in compress_file_range do not have a
758 * locked_folio, so they are not relevant to this logic.
763 extent_clear_unlock_delalloc(inode
, offset
, end
, locked_folio
, &cached
,
764 clear_flags
, PAGE_UNLOCK
|
765 PAGE_START_WRITEBACK
| PAGE_END_WRITEBACK
);
769 struct async_extent
{
773 struct folio
**folios
;
774 unsigned long nr_folios
;
776 struct list_head list
;
780 struct btrfs_inode
*inode
;
781 struct folio
*locked_folio
;
784 blk_opf_t write_flags
;
785 struct list_head extents
;
786 struct cgroup_subsys_state
*blkcg_css
;
787 struct btrfs_work work
;
788 struct async_cow
*async_cow
;
793 struct async_chunk chunks
[];
796 static noinline
int add_async_extent(struct async_chunk
*cow
,
797 u64 start
, u64 ram_size
,
799 struct folio
**folios
,
800 unsigned long nr_folios
,
803 struct async_extent
*async_extent
;
805 async_extent
= kmalloc(sizeof(*async_extent
), GFP_NOFS
);
808 async_extent
->start
= start
;
809 async_extent
->ram_size
= ram_size
;
810 async_extent
->compressed_size
= compressed_size
;
811 async_extent
->folios
= folios
;
812 async_extent
->nr_folios
= nr_folios
;
813 async_extent
->compress_type
= compress_type
;
814 list_add_tail(&async_extent
->list
, &cow
->extents
);
819 * Check if the inode needs to be submitted to compression, based on mount
820 * options, defragmentation, properties or heuristics.
822 static inline int inode_need_compress(struct btrfs_inode
*inode
, u64 start
,
825 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
827 if (!btrfs_inode_can_compress(inode
)) {
828 WARN(IS_ENABLED(CONFIG_BTRFS_DEBUG
),
829 KERN_ERR
"BTRFS: unexpected compression for ino %llu\n",
834 * Only enable sector perfect compression for experimental builds.
836 * This is a big feature change for subpage cases, and can hit
837 * different corner cases, so only limit this feature for
838 * experimental build for now.
840 * ETA for moving this out of experimental builds is 6.15.
842 if (fs_info
->sectorsize
< PAGE_SIZE
&&
843 !IS_ENABLED(CONFIG_BTRFS_EXPERIMENTAL
)) {
844 if (!PAGE_ALIGNED(start
) ||
845 !PAGE_ALIGNED(end
+ 1))
850 if (btrfs_test_opt(fs_info
, FORCE_COMPRESS
))
853 if (inode
->defrag_compress
)
855 /* bad compression ratios */
856 if (inode
->flags
& BTRFS_INODE_NOCOMPRESS
)
858 if (btrfs_test_opt(fs_info
, COMPRESS
) ||
859 inode
->flags
& BTRFS_INODE_COMPRESS
||
860 inode
->prop_compress
)
861 return btrfs_compress_heuristic(inode
, start
, end
);
865 static inline void inode_should_defrag(struct btrfs_inode
*inode
,
866 u64 start
, u64 end
, u64 num_bytes
, u32 small_write
)
868 /* If this is a small write inside eof, kick off a defrag */
869 if (num_bytes
< small_write
&&
870 (start
> 0 || end
+ 1 < inode
->disk_i_size
))
871 btrfs_add_inode_defrag(inode
, small_write
);
874 static int extent_range_clear_dirty_for_io(struct inode
*inode
, u64 start
, u64 end
)
876 unsigned long end_index
= end
>> PAGE_SHIFT
;
880 for (unsigned long index
= start
>> PAGE_SHIFT
;
881 index
<= end_index
; index
++) {
882 folio
= filemap_get_folio(inode
->i_mapping
, index
);
885 ret
= PTR_ERR(folio
);
888 btrfs_folio_clamp_clear_dirty(inode_to_fs_info(inode
), folio
, start
,
896 * Work queue call back to started compression on a file and pages.
898 * This is done inside an ordered work queue, and the compression is spread
899 * across many cpus. The actual IO submission is step two, and the ordered work
900 * queue takes care of making sure that happens in the same order things were
901 * put onto the queue by writepages and friends.
903 * If this code finds it can't get good compression, it puts an entry onto the
904 * work queue to write the uncompressed bytes. This makes sure that both
905 * compressed inodes and uncompressed inodes are written in the same order that
906 * the flusher thread sent them down.
908 static void compress_file_range(struct btrfs_work
*work
)
910 struct async_chunk
*async_chunk
=
911 container_of(work
, struct async_chunk
, work
);
912 struct btrfs_inode
*inode
= async_chunk
->inode
;
913 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
914 struct address_space
*mapping
= inode
->vfs_inode
.i_mapping
;
915 u64 blocksize
= fs_info
->sectorsize
;
916 u64 start
= async_chunk
->start
;
917 u64 end
= async_chunk
->end
;
921 struct folio
**folios
;
922 unsigned long nr_folios
;
923 unsigned long total_compressed
= 0;
924 unsigned long total_in
= 0;
927 int compress_type
= fs_info
->compress_type
;
929 inode_should_defrag(inode
, start
, end
, end
- start
+ 1, SZ_16K
);
932 * We need to call clear_page_dirty_for_io on each page in the range.
933 * Otherwise applications with the file mmap'd can wander in and change
934 * the page contents while we are compressing them.
936 ret
= extent_range_clear_dirty_for_io(&inode
->vfs_inode
, start
, end
);
939 * All the folios should have been locked thus no failure.
941 * And even if some folios are missing, btrfs_compress_folios()
942 * would handle them correctly, so here just do an ASSERT() check for
943 * early logic errors.
948 * We need to save i_size before now because it could change in between
949 * us evaluating the size and assigning it. This is because we lock and
950 * unlock the page in truncate and fallocate, and then modify the i_size
953 * The barriers are to emulate READ_ONCE, remove that once i_size_read
957 i_size
= i_size_read(&inode
->vfs_inode
);
959 actual_end
= min_t(u64
, i_size
, end
+ 1);
962 nr_folios
= (end
>> PAGE_SHIFT
) - (start
>> PAGE_SHIFT
) + 1;
963 nr_folios
= min_t(unsigned long, nr_folios
, BTRFS_MAX_COMPRESSED_PAGES
);
966 * we don't want to send crud past the end of i_size through
967 * compression, that's just a waste of CPU time. So, if the
968 * end of the file is before the start of our current
969 * requested range of bytes, we bail out to the uncompressed
970 * cleanup code that can deal with all of this.
972 * It isn't really the fastest way to fix things, but this is a
973 * very uncommon corner.
975 if (actual_end
<= start
)
976 goto cleanup_and_bail_uncompressed
;
978 total_compressed
= actual_end
- start
;
981 * Skip compression for a small file range(<=blocksize) that
982 * isn't an inline extent, since it doesn't save disk space at all.
984 if (total_compressed
<= blocksize
&&
985 (start
> 0 || end
+ 1 < inode
->disk_i_size
))
986 goto cleanup_and_bail_uncompressed
;
988 total_compressed
= min_t(unsigned long, total_compressed
,
989 BTRFS_MAX_UNCOMPRESSED
);
994 * We do compression for mount -o compress and when the inode has not
995 * been flagged as NOCOMPRESS. This flag can change at any time if we
996 * discover bad compression ratios.
998 if (!inode_need_compress(inode
, start
, end
))
999 goto cleanup_and_bail_uncompressed
;
1001 folios
= kcalloc(nr_folios
, sizeof(struct folio
*), GFP_NOFS
);
1004 * Memory allocation failure is not a fatal error, we can fall
1005 * back to uncompressed code.
1007 goto cleanup_and_bail_uncompressed
;
1010 if (inode
->defrag_compress
)
1011 compress_type
= inode
->defrag_compress
;
1012 else if (inode
->prop_compress
)
1013 compress_type
= inode
->prop_compress
;
1015 /* Compression level is applied here. */
1016 ret
= btrfs_compress_folios(compress_type
| (fs_info
->compress_level
<< 4),
1017 mapping
, start
, folios
, &nr_folios
, &total_in
,
1020 goto mark_incompressible
;
1023 * Zero the tail end of the last page, as we might be sending it down
1026 poff
= offset_in_page(total_compressed
);
1028 folio_zero_range(folios
[nr_folios
- 1], poff
, PAGE_SIZE
- poff
);
1031 * Try to create an inline extent.
1033 * If we didn't compress the entire range, try to create an uncompressed
1034 * inline extent, else a compressed one.
1036 * Check cow_file_range() for why we don't even try to create inline
1037 * extent for the subpage case.
1039 if (total_in
< actual_end
)
1040 ret
= cow_file_range_inline(inode
, NULL
, start
, end
, 0,
1041 BTRFS_COMPRESS_NONE
, NULL
, false);
1043 ret
= cow_file_range_inline(inode
, NULL
, start
, end
, total_compressed
,
1044 compress_type
, folios
[0], false);
1047 mapping_set_error(mapping
, -EIO
);
1052 * We aren't doing an inline extent. Round the compressed size up to a
1053 * block size boundary so the allocator does sane things.
1055 total_compressed
= ALIGN(total_compressed
, blocksize
);
1058 * One last check to make sure the compression is really a win, compare
1059 * the page count read with the blocks on disk, compression must free at
1062 total_in
= round_up(total_in
, fs_info
->sectorsize
);
1063 if (total_compressed
+ blocksize
> total_in
)
1064 goto mark_incompressible
;
1067 * The async work queues will take care of doing actual allocation on
1068 * disk for these compressed pages, and will submit the bios.
1070 ret
= add_async_extent(async_chunk
, start
, total_in
, total_compressed
, folios
,
1071 nr_folios
, compress_type
);
1073 if (start
+ total_in
< end
) {
1080 mark_incompressible
:
1081 if (!btrfs_test_opt(fs_info
, FORCE_COMPRESS
) && !inode
->prop_compress
)
1082 inode
->flags
|= BTRFS_INODE_NOCOMPRESS
;
1083 cleanup_and_bail_uncompressed
:
1084 ret
= add_async_extent(async_chunk
, start
, end
- start
+ 1, 0, NULL
, 0,
1085 BTRFS_COMPRESS_NONE
);
1089 for (i
= 0; i
< nr_folios
; i
++) {
1090 WARN_ON(folios
[i
]->mapping
);
1091 btrfs_free_compr_folio(folios
[i
]);
1097 static void free_async_extent_pages(struct async_extent
*async_extent
)
1101 if (!async_extent
->folios
)
1104 for (i
= 0; i
< async_extent
->nr_folios
; i
++) {
1105 WARN_ON(async_extent
->folios
[i
]->mapping
);
1106 btrfs_free_compr_folio(async_extent
->folios
[i
]);
1108 kfree(async_extent
->folios
);
1109 async_extent
->nr_folios
= 0;
1110 async_extent
->folios
= NULL
;
1113 static void submit_uncompressed_range(struct btrfs_inode
*inode
,
1114 struct async_extent
*async_extent
,
1115 struct folio
*locked_folio
)
1117 u64 start
= async_extent
->start
;
1118 u64 end
= async_extent
->start
+ async_extent
->ram_size
- 1;
1120 struct writeback_control wbc
= {
1121 .sync_mode
= WB_SYNC_ALL
,
1122 .range_start
= start
,
1124 .no_cgroup_owner
= 1,
1127 wbc_attach_fdatawrite_inode(&wbc
, &inode
->vfs_inode
);
1128 ret
= run_delalloc_cow(inode
, locked_folio
, start
, end
,
1130 wbc_detach_inode(&wbc
);
1132 btrfs_cleanup_ordered_extents(inode
, locked_folio
,
1133 start
, end
- start
+ 1);
1135 const u64 page_start
= folio_pos(locked_folio
);
1137 folio_start_writeback(locked_folio
);
1138 folio_end_writeback(locked_folio
);
1139 btrfs_mark_ordered_io_finished(inode
, locked_folio
,
1140 page_start
, PAGE_SIZE
,
1142 mapping_set_error(locked_folio
->mapping
, ret
);
1143 folio_unlock(locked_folio
);
1148 static void submit_one_async_extent(struct async_chunk
*async_chunk
,
1149 struct async_extent
*async_extent
,
1152 struct btrfs_inode
*inode
= async_chunk
->inode
;
1153 struct extent_io_tree
*io_tree
= &inode
->io_tree
;
1154 struct btrfs_root
*root
= inode
->root
;
1155 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
1156 struct btrfs_ordered_extent
*ordered
;
1157 struct btrfs_file_extent file_extent
;
1158 struct btrfs_key ins
;
1159 struct folio
*locked_folio
= NULL
;
1160 struct extent_state
*cached
= NULL
;
1161 struct extent_map
*em
;
1163 u64 start
= async_extent
->start
;
1164 u64 end
= async_extent
->start
+ async_extent
->ram_size
- 1;
1166 if (async_chunk
->blkcg_css
)
1167 kthread_associate_blkcg(async_chunk
->blkcg_css
);
1170 * If async_chunk->locked_folio is in the async_extent range, we need to
1173 if (async_chunk
->locked_folio
) {
1174 u64 locked_folio_start
= folio_pos(async_chunk
->locked_folio
);
1175 u64 locked_folio_end
= locked_folio_start
+
1176 folio_size(async_chunk
->locked_folio
) - 1;
1178 if (!(start
>= locked_folio_end
|| end
<= locked_folio_start
))
1179 locked_folio
= async_chunk
->locked_folio
;
1182 if (async_extent
->compress_type
== BTRFS_COMPRESS_NONE
) {
1183 submit_uncompressed_range(inode
, async_extent
, locked_folio
);
1187 ret
= btrfs_reserve_extent(root
, async_extent
->ram_size
,
1188 async_extent
->compressed_size
,
1189 async_extent
->compressed_size
,
1190 0, *alloc_hint
, &ins
, 1, 1);
1193 * We can't reserve contiguous space for the compressed size.
1194 * Unlikely, but it's possible that we could have enough
1195 * non-contiguous space for the uncompressed size instead. So
1196 * fall back to uncompressed.
1198 submit_uncompressed_range(inode
, async_extent
, locked_folio
);
1202 lock_extent(io_tree
, start
, end
, &cached
);
1204 /* Here we're doing allocation and writeback of the compressed pages */
1205 file_extent
.disk_bytenr
= ins
.objectid
;
1206 file_extent
.disk_num_bytes
= ins
.offset
;
1207 file_extent
.ram_bytes
= async_extent
->ram_size
;
1208 file_extent
.num_bytes
= async_extent
->ram_size
;
1209 file_extent
.offset
= 0;
1210 file_extent
.compression
= async_extent
->compress_type
;
1212 em
= btrfs_create_io_em(inode
, start
, &file_extent
, BTRFS_ORDERED_COMPRESSED
);
1215 goto out_free_reserve
;
1217 free_extent_map(em
);
1219 ordered
= btrfs_alloc_ordered_extent(inode
, start
, &file_extent
,
1220 1 << BTRFS_ORDERED_COMPRESSED
);
1221 if (IS_ERR(ordered
)) {
1222 btrfs_drop_extent_map_range(inode
, start
, end
, false);
1223 ret
= PTR_ERR(ordered
);
1224 goto out_free_reserve
;
1226 btrfs_dec_block_group_reservations(fs_info
, ins
.objectid
);
1228 /* Clear dirty, set writeback and unlock the pages. */
1229 extent_clear_unlock_delalloc(inode
, start
, end
,
1230 NULL
, &cached
, EXTENT_LOCKED
| EXTENT_DELALLOC
,
1231 PAGE_UNLOCK
| PAGE_START_WRITEBACK
);
1232 btrfs_submit_compressed_write(ordered
,
1233 async_extent
->folios
, /* compressed_folios */
1234 async_extent
->nr_folios
,
1235 async_chunk
->write_flags
, true);
1236 *alloc_hint
= ins
.objectid
+ ins
.offset
;
1238 if (async_chunk
->blkcg_css
)
1239 kthread_associate_blkcg(NULL
);
1240 kfree(async_extent
);
1244 btrfs_dec_block_group_reservations(fs_info
, ins
.objectid
);
1245 btrfs_free_reserved_extent(fs_info
, ins
.objectid
, ins
.offset
, 1);
1246 mapping_set_error(inode
->vfs_inode
.i_mapping
, -EIO
);
1247 extent_clear_unlock_delalloc(inode
, start
, end
,
1249 EXTENT_LOCKED
| EXTENT_DELALLOC
|
1250 EXTENT_DELALLOC_NEW
|
1251 EXTENT_DEFRAG
| EXTENT_DO_ACCOUNTING
,
1252 PAGE_UNLOCK
| PAGE_START_WRITEBACK
|
1253 PAGE_END_WRITEBACK
);
1254 free_async_extent_pages(async_extent
);
1255 if (async_chunk
->blkcg_css
)
1256 kthread_associate_blkcg(NULL
);
1257 btrfs_debug(fs_info
,
1258 "async extent submission failed root=%lld inode=%llu start=%llu len=%llu ret=%d",
1259 btrfs_root_id(root
), btrfs_ino(inode
), start
,
1260 async_extent
->ram_size
, ret
);
1261 kfree(async_extent
);
1264 u64
btrfs_get_extent_allocation_hint(struct btrfs_inode
*inode
, u64 start
,
1267 struct extent_map_tree
*em_tree
= &inode
->extent_tree
;
1268 struct extent_map
*em
;
1271 read_lock(&em_tree
->lock
);
1272 em
= search_extent_mapping(em_tree
, start
, num_bytes
);
1275 * if block start isn't an actual block number then find the
1276 * first block in this inode and use that as a hint. If that
1277 * block is also bogus then just don't worry about it.
1279 if (em
->disk_bytenr
>= EXTENT_MAP_LAST_BYTE
) {
1280 free_extent_map(em
);
1281 em
= search_extent_mapping(em_tree
, 0, 0);
1282 if (em
&& em
->disk_bytenr
< EXTENT_MAP_LAST_BYTE
)
1283 alloc_hint
= extent_map_block_start(em
);
1285 free_extent_map(em
);
1287 alloc_hint
= extent_map_block_start(em
);
1288 free_extent_map(em
);
1291 read_unlock(&em_tree
->lock
);
1297 * when extent_io.c finds a delayed allocation range in the file,
1298 * the call backs end up in this code. The basic idea is to
1299 * allocate extents on disk for the range, and create ordered data structs
1300 * in ram to track those extents.
1302 * locked_folio is the folio that writepage had locked already. We use
1303 * it to make sure we don't do extra locks or unlocks.
1305 * When this function fails, it unlocks all pages except @locked_folio.
1307 * When this function successfully creates an inline extent, it returns 1 and
1308 * unlocks all pages including locked_folio and starts I/O on them.
1309 * (In reality inline extents are limited to a single page, so locked_folio is
1310 * the only page handled anyway).
1312 * When this function succeed and creates a normal extent, the page locking
1313 * status depends on the passed in flags:
1315 * - If @keep_locked is set, all pages are kept locked.
1316 * - Else all pages except for @locked_folio are unlocked.
1318 * When a failure happens in the second or later iteration of the
1319 * while-loop, the ordered extents created in previous iterations are kept
1320 * intact. So, the caller must clean them up by calling
1321 * btrfs_cleanup_ordered_extents(). See btrfs_run_delalloc_range() for
1324 static noinline
int cow_file_range(struct btrfs_inode
*inode
,
1325 struct folio
*locked_folio
, u64 start
,
1326 u64 end
, u64
*done_offset
,
1327 bool keep_locked
, bool no_inline
)
1329 struct btrfs_root
*root
= inode
->root
;
1330 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
1331 struct extent_state
*cached
= NULL
;
1333 u64 orig_start
= start
;
1335 u64 cur_alloc_size
= 0;
1337 u64 blocksize
= fs_info
->sectorsize
;
1338 struct btrfs_key ins
;
1339 struct extent_map
*em
;
1340 unsigned clear_bits
;
1341 unsigned long page_ops
;
1344 if (btrfs_is_free_space_inode(inode
)) {
1349 num_bytes
= ALIGN(end
- start
+ 1, blocksize
);
1350 num_bytes
= max(blocksize
, num_bytes
);
1351 ASSERT(num_bytes
<= btrfs_super_total_bytes(fs_info
->super_copy
));
1353 inode_should_defrag(inode
, start
, end
, num_bytes
, SZ_64K
);
1356 /* lets try to make an inline extent */
1357 ret
= cow_file_range_inline(inode
, locked_folio
, start
, end
, 0,
1358 BTRFS_COMPRESS_NONE
, NULL
, false);
1361 * We succeeded, return 1 so the caller knows we're done
1362 * with this page and already handled the IO.
1364 * If there was an error then cow_file_range_inline() has
1365 * already done the cleanup.
1373 alloc_hint
= btrfs_get_extent_allocation_hint(inode
, start
, num_bytes
);
1376 * Relocation relies on the relocated extents to have exactly the same
1377 * size as the original extents. Normally writeback for relocation data
1378 * extents follows a NOCOW path because relocation preallocates the
1379 * extents. However, due to an operation such as scrub turning a block
1380 * group to RO mode, it may fallback to COW mode, so we must make sure
1381 * an extent allocated during COW has exactly the requested size and can
1382 * not be split into smaller extents, otherwise relocation breaks and
1383 * fails during the stage where it updates the bytenr of file extent
1386 if (btrfs_is_data_reloc_root(root
))
1387 min_alloc_size
= num_bytes
;
1389 min_alloc_size
= fs_info
->sectorsize
;
1391 while (num_bytes
> 0) {
1392 struct btrfs_ordered_extent
*ordered
;
1393 struct btrfs_file_extent file_extent
;
1395 ret
= btrfs_reserve_extent(root
, num_bytes
, num_bytes
,
1396 min_alloc_size
, 0, alloc_hint
,
1398 if (ret
== -EAGAIN
) {
1400 * btrfs_reserve_extent only returns -EAGAIN for zoned
1401 * file systems, which is an indication that there are
1402 * no active zones to allocate from at the moment.
1404 * If this is the first loop iteration, wait for at
1405 * least one zone to finish before retrying the
1406 * allocation. Otherwise ask the caller to write out
1407 * the already allocated blocks before coming back to
1408 * us, or return -ENOSPC if it can't handle retries.
1410 ASSERT(btrfs_is_zoned(fs_info
));
1411 if (start
== orig_start
) {
1412 wait_on_bit_io(&inode
->root
->fs_info
->flags
,
1413 BTRFS_FS_NEED_ZONE_FINISH
,
1414 TASK_UNINTERRUPTIBLE
);
1418 *done_offset
= start
- 1;
1425 cur_alloc_size
= ins
.offset
;
1427 file_extent
.disk_bytenr
= ins
.objectid
;
1428 file_extent
.disk_num_bytes
= ins
.offset
;
1429 file_extent
.num_bytes
= ins
.offset
;
1430 file_extent
.ram_bytes
= ins
.offset
;
1431 file_extent
.offset
= 0;
1432 file_extent
.compression
= BTRFS_COMPRESS_NONE
;
1434 lock_extent(&inode
->io_tree
, start
, start
+ cur_alloc_size
- 1,
1437 em
= btrfs_create_io_em(inode
, start
, &file_extent
,
1438 BTRFS_ORDERED_REGULAR
);
1440 unlock_extent(&inode
->io_tree
, start
,
1441 start
+ cur_alloc_size
- 1, &cached
);
1445 free_extent_map(em
);
1447 ordered
= btrfs_alloc_ordered_extent(inode
, start
, &file_extent
,
1448 1 << BTRFS_ORDERED_REGULAR
);
1449 if (IS_ERR(ordered
)) {
1450 unlock_extent(&inode
->io_tree
, start
,
1451 start
+ cur_alloc_size
- 1, &cached
);
1452 ret
= PTR_ERR(ordered
);
1453 goto out_drop_extent_cache
;
1456 if (btrfs_is_data_reloc_root(root
)) {
1457 ret
= btrfs_reloc_clone_csums(ordered
);
1460 * Only drop cache here, and process as normal.
1462 * We must not allow extent_clear_unlock_delalloc()
1463 * at out_unlock label to free meta of this ordered
1464 * extent, as its meta should be freed by
1465 * btrfs_finish_ordered_io().
1467 * So we must continue until @start is increased to
1468 * skip current ordered extent.
1471 btrfs_drop_extent_map_range(inode
, start
,
1472 start
+ cur_alloc_size
- 1,
1475 btrfs_put_ordered_extent(ordered
);
1477 btrfs_dec_block_group_reservations(fs_info
, ins
.objectid
);
1480 * We're not doing compressed IO, don't unlock the first page
1481 * (which the caller expects to stay locked), don't clear any
1482 * dirty bits and don't set any writeback bits
1484 * Do set the Ordered flag so we know this page was
1485 * properly setup for writepage.
1487 page_ops
= (keep_locked
? 0 : PAGE_UNLOCK
);
1488 page_ops
|= PAGE_SET_ORDERED
;
1490 extent_clear_unlock_delalloc(inode
, start
, start
+ cur_alloc_size
- 1,
1491 locked_folio
, &cached
,
1492 EXTENT_LOCKED
| EXTENT_DELALLOC
,
1494 if (num_bytes
< cur_alloc_size
)
1497 num_bytes
-= cur_alloc_size
;
1498 alloc_hint
= ins
.objectid
+ ins
.offset
;
1499 start
+= cur_alloc_size
;
1503 * btrfs_reloc_clone_csums() error, since start is increased
1504 * extent_clear_unlock_delalloc() at out_unlock label won't
1505 * free metadata of current ordered extent, we're OK to exit.
1515 out_drop_extent_cache
:
1516 btrfs_drop_extent_map_range(inode
, start
, start
+ cur_alloc_size
- 1, false);
1518 btrfs_dec_block_group_reservations(fs_info
, ins
.objectid
);
1519 btrfs_free_reserved_extent(fs_info
, ins
.objectid
, ins
.offset
, 1);
1522 * Now, we have three regions to clean up:
1524 * |-------(1)----|---(2)---|-------------(3)----------|
1525 * `- orig_start `- start `- start + cur_alloc_size `- end
1527 * We process each region below.
1530 clear_bits
= EXTENT_LOCKED
| EXTENT_DELALLOC
| EXTENT_DELALLOC_NEW
|
1531 EXTENT_DEFRAG
| EXTENT_CLEAR_META_RESV
;
1532 page_ops
= PAGE_UNLOCK
| PAGE_START_WRITEBACK
| PAGE_END_WRITEBACK
;
1535 * For the range (1). We have already instantiated the ordered extents
1536 * for this region. They are cleaned up by
1537 * btrfs_cleanup_ordered_extents() in e.g,
1538 * btrfs_run_delalloc_range(). EXTENT_LOCKED | EXTENT_DELALLOC are
1539 * already cleared in the above loop. And, EXTENT_DELALLOC_NEW |
1540 * EXTENT_DEFRAG | EXTENT_CLEAR_META_RESV are handled by the cleanup
1543 * However, in case of @keep_locked, we still need to unlock the pages
1544 * (except @locked_folio) to ensure all the pages are unlocked.
1546 if (keep_locked
&& orig_start
< start
) {
1548 mapping_set_error(inode
->vfs_inode
.i_mapping
, ret
);
1549 extent_clear_unlock_delalloc(inode
, orig_start
, start
- 1,
1550 locked_folio
, NULL
, 0, page_ops
);
1554 * At this point we're unlocked, we want to make sure we're only
1555 * clearing these flags under the extent lock, so lock the rest of the
1556 * range and clear everything up.
1558 lock_extent(&inode
->io_tree
, start
, end
, NULL
);
1561 * For the range (2). If we reserved an extent for our delalloc range
1562 * (or a subrange) and failed to create the respective ordered extent,
1563 * then it means that when we reserved the extent we decremented the
1564 * extent's size from the data space_info's bytes_may_use counter and
1565 * incremented the space_info's bytes_reserved counter by the same
1566 * amount. We must make sure extent_clear_unlock_delalloc() does not try
1567 * to decrement again the data space_info's bytes_may_use counter,
1568 * therefore we do not pass it the flag EXTENT_CLEAR_DATA_RESV.
1570 if (cur_alloc_size
) {
1571 extent_clear_unlock_delalloc(inode
, start
,
1572 start
+ cur_alloc_size
- 1,
1573 locked_folio
, &cached
, clear_bits
,
1575 btrfs_qgroup_free_data(inode
, NULL
, start
, cur_alloc_size
, NULL
);
1579 * For the range (3). We never touched the region. In addition to the
1580 * clear_bits above, we add EXTENT_CLEAR_DATA_RESV to release the data
1581 * space_info's bytes_may_use counter, reserved in
1582 * btrfs_check_data_free_space().
1584 if (start
+ cur_alloc_size
< end
) {
1585 clear_bits
|= EXTENT_CLEAR_DATA_RESV
;
1586 extent_clear_unlock_delalloc(inode
, start
+ cur_alloc_size
,
1588 &cached
, clear_bits
, page_ops
);
1589 btrfs_qgroup_free_data(inode
, NULL
, start
+ cur_alloc_size
,
1590 end
- start
- cur_alloc_size
+ 1, NULL
);
1596 * Phase two of compressed writeback. This is the ordered portion of the code,
1597 * which only gets called in the order the work was queued. We walk all the
1598 * async extents created by compress_file_range and send them down to the disk.
1600 * If called with @do_free == true then it'll try to finish the work and free
1601 * the work struct eventually.
1603 static noinline
void submit_compressed_extents(struct btrfs_work
*work
, bool do_free
)
1605 struct async_chunk
*async_chunk
= container_of(work
, struct async_chunk
,
1607 struct btrfs_fs_info
*fs_info
= btrfs_work_owner(work
);
1608 struct async_extent
*async_extent
;
1609 unsigned long nr_pages
;
1613 struct async_cow
*async_cow
;
1615 btrfs_add_delayed_iput(async_chunk
->inode
);
1616 if (async_chunk
->blkcg_css
)
1617 css_put(async_chunk
->blkcg_css
);
1619 async_cow
= async_chunk
->async_cow
;
1620 if (atomic_dec_and_test(&async_cow
->num_chunks
))
1625 nr_pages
= (async_chunk
->end
- async_chunk
->start
+ PAGE_SIZE
) >>
1628 while (!list_empty(&async_chunk
->extents
)) {
1629 async_extent
= list_entry(async_chunk
->extents
.next
,
1630 struct async_extent
, list
);
1631 list_del(&async_extent
->list
);
1632 submit_one_async_extent(async_chunk
, async_extent
, &alloc_hint
);
1635 /* atomic_sub_return implies a barrier */
1636 if (atomic_sub_return(nr_pages
, &fs_info
->async_delalloc_pages
) <
1638 cond_wake_up_nomb(&fs_info
->async_submit_wait
);
1641 static bool run_delalloc_compressed(struct btrfs_inode
*inode
,
1642 struct folio
*locked_folio
, u64 start
,
1643 u64 end
, struct writeback_control
*wbc
)
1645 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
1646 struct cgroup_subsys_state
*blkcg_css
= wbc_blkcg_css(wbc
);
1647 struct async_cow
*ctx
;
1648 struct async_chunk
*async_chunk
;
1649 unsigned long nr_pages
;
1650 u64 num_chunks
= DIV_ROUND_UP(end
- start
, SZ_512K
);
1653 const blk_opf_t write_flags
= wbc_to_write_flags(wbc
);
1655 nofs_flag
= memalloc_nofs_save();
1656 ctx
= kvmalloc(struct_size(ctx
, chunks
, num_chunks
), GFP_KERNEL
);
1657 memalloc_nofs_restore(nofs_flag
);
1661 set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT
, &inode
->runtime_flags
);
1663 async_chunk
= ctx
->chunks
;
1664 atomic_set(&ctx
->num_chunks
, num_chunks
);
1666 for (i
= 0; i
< num_chunks
; i
++) {
1667 u64 cur_end
= min(end
, start
+ SZ_512K
- 1);
1670 * igrab is called higher up in the call chain, take only the
1671 * lightweight reference for the callback lifetime
1673 ihold(&inode
->vfs_inode
);
1674 async_chunk
[i
].async_cow
= ctx
;
1675 async_chunk
[i
].inode
= inode
;
1676 async_chunk
[i
].start
= start
;
1677 async_chunk
[i
].end
= cur_end
;
1678 async_chunk
[i
].write_flags
= write_flags
;
1679 INIT_LIST_HEAD(&async_chunk
[i
].extents
);
1682 * The locked_folio comes all the way from writepage and its
1683 * the original folio we were actually given. As we spread
1684 * this large delalloc region across multiple async_chunk
1685 * structs, only the first struct needs a pointer to
1688 * This way we don't need racey decisions about who is supposed
1693 * Depending on the compressibility, the pages might or
1694 * might not go through async. We want all of them to
1695 * be accounted against wbc once. Let's do it here
1696 * before the paths diverge. wbc accounting is used
1697 * only for foreign writeback detection and doesn't
1698 * need full accuracy. Just account the whole thing
1699 * against the first page.
1701 wbc_account_cgroup_owner(wbc
, locked_folio
,
1703 async_chunk
[i
].locked_folio
= locked_folio
;
1704 locked_folio
= NULL
;
1706 async_chunk
[i
].locked_folio
= NULL
;
1709 if (blkcg_css
!= blkcg_root_css
) {
1711 async_chunk
[i
].blkcg_css
= blkcg_css
;
1712 async_chunk
[i
].write_flags
|= REQ_BTRFS_CGROUP_PUNT
;
1714 async_chunk
[i
].blkcg_css
= NULL
;
1717 btrfs_init_work(&async_chunk
[i
].work
, compress_file_range
,
1718 submit_compressed_extents
);
1720 nr_pages
= DIV_ROUND_UP(cur_end
- start
, PAGE_SIZE
);
1721 atomic_add(nr_pages
, &fs_info
->async_delalloc_pages
);
1723 btrfs_queue_work(fs_info
->delalloc_workers
, &async_chunk
[i
].work
);
1725 start
= cur_end
+ 1;
1731 * Run the delalloc range from start to end, and write back any dirty pages
1732 * covered by the range.
1734 static noinline
int run_delalloc_cow(struct btrfs_inode
*inode
,
1735 struct folio
*locked_folio
, u64 start
,
1736 u64 end
, struct writeback_control
*wbc
,
1739 u64 done_offset
= end
;
1742 while (start
<= end
) {
1743 ret
= cow_file_range(inode
, locked_folio
, start
, end
,
1744 &done_offset
, true, false);
1747 extent_write_locked_range(&inode
->vfs_inode
, locked_folio
,
1748 start
, done_offset
, wbc
, pages_dirty
);
1749 start
= done_offset
+ 1;
1755 static int fallback_to_cow(struct btrfs_inode
*inode
,
1756 struct folio
*locked_folio
, const u64 start
,
1759 const bool is_space_ino
= btrfs_is_free_space_inode(inode
);
1760 const bool is_reloc_ino
= btrfs_is_data_reloc_root(inode
->root
);
1761 const u64 range_bytes
= end
+ 1 - start
;
1762 struct extent_io_tree
*io_tree
= &inode
->io_tree
;
1763 struct extent_state
*cached_state
= NULL
;
1764 u64 range_start
= start
;
1769 * If EXTENT_NORESERVE is set it means that when the buffered write was
1770 * made we had not enough available data space and therefore we did not
1771 * reserve data space for it, since we though we could do NOCOW for the
1772 * respective file range (either there is prealloc extent or the inode
1773 * has the NOCOW bit set).
1775 * However when we need to fallback to COW mode (because for example the
1776 * block group for the corresponding extent was turned to RO mode by a
1777 * scrub or relocation) we need to do the following:
1779 * 1) We increment the bytes_may_use counter of the data space info.
1780 * If COW succeeds, it allocates a new data extent and after doing
1781 * that it decrements the space info's bytes_may_use counter and
1782 * increments its bytes_reserved counter by the same amount (we do
1783 * this at btrfs_add_reserved_bytes()). So we need to increment the
1784 * bytes_may_use counter to compensate (when space is reserved at
1785 * buffered write time, the bytes_may_use counter is incremented);
1787 * 2) We clear the EXTENT_NORESERVE bit from the range. We do this so
1788 * that if the COW path fails for any reason, it decrements (through
1789 * extent_clear_unlock_delalloc()) the bytes_may_use counter of the
1790 * data space info, which we incremented in the step above.
1792 * If we need to fallback to cow and the inode corresponds to a free
1793 * space cache inode or an inode of the data relocation tree, we must
1794 * also increment bytes_may_use of the data space_info for the same
1795 * reason. Space caches and relocated data extents always get a prealloc
1796 * extent for them, however scrub or balance may have set the block
1797 * group that contains that extent to RO mode and therefore force COW
1798 * when starting writeback.
1800 lock_extent(io_tree
, start
, end
, &cached_state
);
1801 count
= count_range_bits(io_tree
, &range_start
, end
, range_bytes
,
1802 EXTENT_NORESERVE
, 0, NULL
);
1803 if (count
> 0 || is_space_ino
|| is_reloc_ino
) {
1805 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
1806 struct btrfs_space_info
*sinfo
= fs_info
->data_sinfo
;
1808 if (is_space_ino
|| is_reloc_ino
)
1809 bytes
= range_bytes
;
1811 spin_lock(&sinfo
->lock
);
1812 btrfs_space_info_update_bytes_may_use(fs_info
, sinfo
, bytes
);
1813 spin_unlock(&sinfo
->lock
);
1816 clear_extent_bit(io_tree
, start
, end
, EXTENT_NORESERVE
,
1819 unlock_extent(io_tree
, start
, end
, &cached_state
);
1822 * Don't try to create inline extents, as a mix of inline extent that
1823 * is written out and unlocked directly and a normal NOCOW extent
1826 ret
= cow_file_range(inode
, locked_folio
, start
, end
, NULL
, false,
1832 struct can_nocow_file_extent_args
{
1835 /* Start file offset of the range we want to NOCOW. */
1837 /* End file offset (inclusive) of the range we want to NOCOW. */
1839 bool writeback_path
;
1842 * Free the path passed to can_nocow_file_extent() once it's not needed
1848 * Output fields. Only set when can_nocow_file_extent() returns 1.
1849 * The expected file extent for the NOCOW write.
1851 struct btrfs_file_extent file_extent
;
1855 * Check if we can NOCOW the file extent that the path points to.
1856 * This function may return with the path released, so the caller should check
1857 * if path->nodes[0] is NULL or not if it needs to use the path afterwards.
1859 * Returns: < 0 on error
1860 * 0 if we can not NOCOW
1863 static int can_nocow_file_extent(struct btrfs_path
*path
,
1864 struct btrfs_key
*key
,
1865 struct btrfs_inode
*inode
,
1866 struct can_nocow_file_extent_args
*args
)
1868 const bool is_freespace_inode
= btrfs_is_free_space_inode(inode
);
1869 struct extent_buffer
*leaf
= path
->nodes
[0];
1870 struct btrfs_root
*root
= inode
->root
;
1871 struct btrfs_file_extent_item
*fi
;
1872 struct btrfs_root
*csum_root
;
1878 bool nowait
= path
->nowait
;
1880 fi
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_file_extent_item
);
1881 extent_type
= btrfs_file_extent_type(leaf
, fi
);
1883 if (extent_type
== BTRFS_FILE_EXTENT_INLINE
)
1886 if (!(inode
->flags
& BTRFS_INODE_NODATACOW
) &&
1887 extent_type
== BTRFS_FILE_EXTENT_REG
)
1891 * If the extent was created before the generation where the last snapshot
1892 * for its subvolume was created, then this implies the extent is shared,
1893 * hence we must COW.
1895 if (!args
->strict
&&
1896 btrfs_file_extent_generation(leaf
, fi
) <=
1897 btrfs_root_last_snapshot(&root
->root_item
))
1900 /* An explicit hole, must COW. */
1901 if (btrfs_file_extent_disk_bytenr(leaf
, fi
) == 0)
1904 /* Compressed/encrypted/encoded extents must be COWed. */
1905 if (btrfs_file_extent_compression(leaf
, fi
) ||
1906 btrfs_file_extent_encryption(leaf
, fi
) ||
1907 btrfs_file_extent_other_encoding(leaf
, fi
))
1910 extent_end
= btrfs_file_extent_end(path
);
1912 args
->file_extent
.disk_bytenr
= btrfs_file_extent_disk_bytenr(leaf
, fi
);
1913 args
->file_extent
.disk_num_bytes
= btrfs_file_extent_disk_num_bytes(leaf
, fi
);
1914 args
->file_extent
.ram_bytes
= btrfs_file_extent_ram_bytes(leaf
, fi
);
1915 args
->file_extent
.offset
= btrfs_file_extent_offset(leaf
, fi
);
1916 args
->file_extent
.compression
= btrfs_file_extent_compression(leaf
, fi
);
1919 * The following checks can be expensive, as they need to take other
1920 * locks and do btree or rbtree searches, so release the path to avoid
1921 * blocking other tasks for too long.
1923 btrfs_release_path(path
);
1925 ret
= btrfs_cross_ref_exist(root
, btrfs_ino(inode
),
1926 key
->offset
- args
->file_extent
.offset
,
1927 args
->file_extent
.disk_bytenr
, args
->strict
, path
);
1928 WARN_ON_ONCE(ret
> 0 && is_freespace_inode
);
1932 if (args
->free_path
) {
1934 * We don't need the path anymore, plus through the
1935 * btrfs_lookup_csums_list() call below we will end up allocating
1936 * another path. So free the path to avoid unnecessary extra
1939 btrfs_free_path(path
);
1943 /* If there are pending snapshots for this root, we must COW. */
1944 if (args
->writeback_path
&& !is_freespace_inode
&&
1945 atomic_read(&root
->snapshot_force_cow
))
1948 args
->file_extent
.num_bytes
= min(args
->end
+ 1, extent_end
) - args
->start
;
1949 args
->file_extent
.offset
+= args
->start
- key
->offset
;
1950 io_start
= args
->file_extent
.disk_bytenr
+ args
->file_extent
.offset
;
1953 * Force COW if csums exist in the range. This ensures that csums for a
1954 * given extent are either valid or do not exist.
1957 csum_root
= btrfs_csum_root(root
->fs_info
, io_start
);
1958 ret
= btrfs_lookup_csums_list(csum_root
, io_start
,
1959 io_start
+ args
->file_extent
.num_bytes
- 1,
1961 WARN_ON_ONCE(ret
> 0 && is_freespace_inode
);
1967 if (args
->free_path
&& path
)
1968 btrfs_free_path(path
);
1970 return ret
< 0 ? ret
: can_nocow
;
1974 * when nowcow writeback call back. This checks for snapshots or COW copies
1975 * of the extents that exist in the file, and COWs the file as required.
1977 * If no cow copies or snapshots exist, we write directly to the existing
1980 static noinline
int run_delalloc_nocow(struct btrfs_inode
*inode
,
1981 struct folio
*locked_folio
,
1982 const u64 start
, const u64 end
)
1984 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
1985 struct btrfs_root
*root
= inode
->root
;
1986 struct btrfs_path
*path
;
1987 u64 cow_start
= (u64
)-1;
1988 u64 cur_offset
= start
;
1990 bool check_prev
= true;
1991 u64 ino
= btrfs_ino(inode
);
1992 struct can_nocow_file_extent_args nocow_args
= { 0 };
1995 * Normally on a zoned device we're only doing COW writes, but in case
1996 * of relocation on a zoned filesystem serializes I/O so that we're only
1997 * writing sequentially and can end up here as well.
1999 ASSERT(!btrfs_is_zoned(fs_info
) || btrfs_is_data_reloc_root(root
));
2001 path
= btrfs_alloc_path();
2007 nocow_args
.end
= end
;
2008 nocow_args
.writeback_path
= true;
2010 while (cur_offset
<= end
) {
2011 struct btrfs_block_group
*nocow_bg
= NULL
;
2012 struct btrfs_ordered_extent
*ordered
;
2013 struct btrfs_key found_key
;
2014 struct btrfs_file_extent_item
*fi
;
2015 struct extent_buffer
*leaf
;
2016 struct extent_state
*cached_state
= NULL
;
2022 ret
= btrfs_lookup_file_extent(NULL
, root
, path
, ino
,
2028 * If there is no extent for our range when doing the initial
2029 * search, then go back to the previous slot as it will be the
2030 * one containing the search offset
2032 if (ret
> 0 && path
->slots
[0] > 0 && check_prev
) {
2033 leaf
= path
->nodes
[0];
2034 btrfs_item_key_to_cpu(leaf
, &found_key
,
2035 path
->slots
[0] - 1);
2036 if (found_key
.objectid
== ino
&&
2037 found_key
.type
== BTRFS_EXTENT_DATA_KEY
)
2042 /* Go to next leaf if we have exhausted the current one */
2043 leaf
= path
->nodes
[0];
2044 if (path
->slots
[0] >= btrfs_header_nritems(leaf
)) {
2045 ret
= btrfs_next_leaf(root
, path
);
2050 leaf
= path
->nodes
[0];
2053 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
2055 /* Didn't find anything for our INO */
2056 if (found_key
.objectid
> ino
)
2059 * Keep searching until we find an EXTENT_ITEM or there are no
2060 * more extents for this inode
2062 if (WARN_ON_ONCE(found_key
.objectid
< ino
) ||
2063 found_key
.type
< BTRFS_EXTENT_DATA_KEY
) {
2068 /* Found key is not EXTENT_DATA_KEY or starts after req range */
2069 if (found_key
.type
> BTRFS_EXTENT_DATA_KEY
||
2070 found_key
.offset
> end
)
2074 * If the found extent starts after requested offset, then
2075 * adjust extent_end to be right before this extent begins
2077 if (found_key
.offset
> cur_offset
) {
2078 extent_end
= found_key
.offset
;
2084 * Found extent which begins before our range and potentially
2087 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
2088 struct btrfs_file_extent_item
);
2089 extent_type
= btrfs_file_extent_type(leaf
, fi
);
2090 /* If this is triggered then we have a memory corruption. */
2091 ASSERT(extent_type
< BTRFS_NR_FILE_EXTENT_TYPES
);
2092 if (WARN_ON(extent_type
>= BTRFS_NR_FILE_EXTENT_TYPES
)) {
2096 extent_end
= btrfs_file_extent_end(path
);
2099 * If the extent we got ends before our current offset, skip to
2102 if (extent_end
<= cur_offset
) {
2107 nocow_args
.start
= cur_offset
;
2108 ret
= can_nocow_file_extent(path
, &found_key
, inode
, &nocow_args
);
2115 nocow_bg
= btrfs_inc_nocow_writers(fs_info
,
2116 nocow_args
.file_extent
.disk_bytenr
+
2117 nocow_args
.file_extent
.offset
);
2121 * If we can't perform NOCOW writeback for the range,
2122 * then record the beginning of the range that needs to
2123 * be COWed. It will be written out before the next
2124 * NOCOW range if we find one, or when exiting this
2127 if (cow_start
== (u64
)-1)
2128 cow_start
= cur_offset
;
2129 cur_offset
= extent_end
;
2130 if (cur_offset
> end
)
2132 if (!path
->nodes
[0])
2139 * COW range from cow_start to found_key.offset - 1. As the key
2140 * will contain the beginning of the first extent that can be
2141 * NOCOW, following one which needs to be COW'ed
2143 if (cow_start
!= (u64
)-1) {
2144 ret
= fallback_to_cow(inode
, locked_folio
, cow_start
,
2145 found_key
.offset
- 1);
2146 cow_start
= (u64
)-1;
2148 btrfs_dec_nocow_writers(nocow_bg
);
2153 nocow_end
= cur_offset
+ nocow_args
.file_extent
.num_bytes
- 1;
2154 lock_extent(&inode
->io_tree
, cur_offset
, nocow_end
, &cached_state
);
2156 is_prealloc
= extent_type
== BTRFS_FILE_EXTENT_PREALLOC
;
2158 struct extent_map
*em
;
2160 em
= btrfs_create_io_em(inode
, cur_offset
,
2161 &nocow_args
.file_extent
,
2162 BTRFS_ORDERED_PREALLOC
);
2164 unlock_extent(&inode
->io_tree
, cur_offset
,
2165 nocow_end
, &cached_state
);
2166 btrfs_dec_nocow_writers(nocow_bg
);
2170 free_extent_map(em
);
2173 ordered
= btrfs_alloc_ordered_extent(inode
, cur_offset
,
2174 &nocow_args
.file_extent
,
2176 ? (1 << BTRFS_ORDERED_PREALLOC
)
2177 : (1 << BTRFS_ORDERED_NOCOW
));
2178 btrfs_dec_nocow_writers(nocow_bg
);
2179 if (IS_ERR(ordered
)) {
2181 btrfs_drop_extent_map_range(inode
, cur_offset
,
2184 unlock_extent(&inode
->io_tree
, cur_offset
,
2185 nocow_end
, &cached_state
);
2186 ret
= PTR_ERR(ordered
);
2190 if (btrfs_is_data_reloc_root(root
))
2192 * Error handled later, as we must prevent
2193 * extent_clear_unlock_delalloc() in error handler
2194 * from freeing metadata of created ordered extent.
2196 ret
= btrfs_reloc_clone_csums(ordered
);
2197 btrfs_put_ordered_extent(ordered
);
2199 extent_clear_unlock_delalloc(inode
, cur_offset
, nocow_end
,
2200 locked_folio
, &cached_state
,
2201 EXTENT_LOCKED
| EXTENT_DELALLOC
|
2202 EXTENT_CLEAR_DATA_RESV
,
2203 PAGE_UNLOCK
| PAGE_SET_ORDERED
);
2205 cur_offset
= extent_end
;
2208 * btrfs_reloc_clone_csums() error, now we're OK to call error
2209 * handler, as metadata for created ordered extent will only
2210 * be freed by btrfs_finish_ordered_io().
2215 btrfs_release_path(path
);
2217 if (cur_offset
<= end
&& cow_start
== (u64
)-1)
2218 cow_start
= cur_offset
;
2220 if (cow_start
!= (u64
)-1) {
2222 ret
= fallback_to_cow(inode
, locked_folio
, cow_start
, end
);
2223 cow_start
= (u64
)-1;
2228 btrfs_free_path(path
);
2233 * If an error happened while a COW region is outstanding, cur_offset
2234 * needs to be reset to cow_start to ensure the COW region is unlocked
2237 if (cow_start
!= (u64
)-1)
2238 cur_offset
= cow_start
;
2241 * We need to lock the extent here because we're clearing DELALLOC and
2242 * we're not locked at this point.
2244 if (cur_offset
< end
) {
2245 struct extent_state
*cached
= NULL
;
2247 lock_extent(&inode
->io_tree
, cur_offset
, end
, &cached
);
2248 extent_clear_unlock_delalloc(inode
, cur_offset
, end
,
2249 locked_folio
, &cached
,
2250 EXTENT_LOCKED
| EXTENT_DELALLOC
|
2252 EXTENT_DO_ACCOUNTING
, PAGE_UNLOCK
|
2253 PAGE_START_WRITEBACK
|
2254 PAGE_END_WRITEBACK
);
2255 btrfs_qgroup_free_data(inode
, NULL
, cur_offset
, end
- cur_offset
+ 1, NULL
);
2257 btrfs_free_path(path
);
2261 static bool should_nocow(struct btrfs_inode
*inode
, u64 start
, u64 end
)
2263 if (inode
->flags
& (BTRFS_INODE_NODATACOW
| BTRFS_INODE_PREALLOC
)) {
2264 if (inode
->defrag_bytes
&&
2265 test_range_bit_exists(&inode
->io_tree
, start
, end
, EXTENT_DEFRAG
))
2273 * Function to process delayed allocation (create CoW) for ranges which are
2274 * being touched for the first time.
2276 int btrfs_run_delalloc_range(struct btrfs_inode
*inode
, struct folio
*locked_folio
,
2277 u64 start
, u64 end
, struct writeback_control
*wbc
)
2279 const bool zoned
= btrfs_is_zoned(inode
->root
->fs_info
);
2283 * The range must cover part of the @locked_folio, or a return of 1
2284 * can confuse the caller.
2286 ASSERT(!(end
<= folio_pos(locked_folio
) ||
2287 start
>= folio_pos(locked_folio
) + folio_size(locked_folio
)));
2289 if (should_nocow(inode
, start
, end
)) {
2290 ret
= run_delalloc_nocow(inode
, locked_folio
, start
, end
);
2294 if (btrfs_inode_can_compress(inode
) &&
2295 inode_need_compress(inode
, start
, end
) &&
2296 run_delalloc_compressed(inode
, locked_folio
, start
, end
, wbc
))
2300 ret
= run_delalloc_cow(inode
, locked_folio
, start
, end
, wbc
,
2303 ret
= cow_file_range(inode
, locked_folio
, start
, end
, NULL
,
2308 btrfs_cleanup_ordered_extents(inode
, locked_folio
, start
,
2313 void btrfs_split_delalloc_extent(struct btrfs_inode
*inode
,
2314 struct extent_state
*orig
, u64 split
)
2316 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
2319 lockdep_assert_held(&inode
->io_tree
.lock
);
2321 /* not delalloc, ignore it */
2322 if (!(orig
->state
& EXTENT_DELALLOC
))
2325 size
= orig
->end
- orig
->start
+ 1;
2326 if (size
> fs_info
->max_extent_size
) {
2331 * See the explanation in btrfs_merge_delalloc_extent, the same
2332 * applies here, just in reverse.
2334 new_size
= orig
->end
- split
+ 1;
2335 num_extents
= count_max_extents(fs_info
, new_size
);
2336 new_size
= split
- orig
->start
;
2337 num_extents
+= count_max_extents(fs_info
, new_size
);
2338 if (count_max_extents(fs_info
, size
) >= num_extents
)
2342 spin_lock(&inode
->lock
);
2343 btrfs_mod_outstanding_extents(inode
, 1);
2344 spin_unlock(&inode
->lock
);
2348 * Handle merged delayed allocation extents so we can keep track of new extents
2349 * that are just merged onto old extents, such as when we are doing sequential
2350 * writes, so we can properly account for the metadata space we'll need.
2352 void btrfs_merge_delalloc_extent(struct btrfs_inode
*inode
, struct extent_state
*new,
2353 struct extent_state
*other
)
2355 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
2356 u64 new_size
, old_size
;
2359 lockdep_assert_held(&inode
->io_tree
.lock
);
2361 /* not delalloc, ignore it */
2362 if (!(other
->state
& EXTENT_DELALLOC
))
2365 if (new->start
> other
->start
)
2366 new_size
= new->end
- other
->start
+ 1;
2368 new_size
= other
->end
- new->start
+ 1;
2370 /* we're not bigger than the max, unreserve the space and go */
2371 if (new_size
<= fs_info
->max_extent_size
) {
2372 spin_lock(&inode
->lock
);
2373 btrfs_mod_outstanding_extents(inode
, -1);
2374 spin_unlock(&inode
->lock
);
2379 * We have to add up either side to figure out how many extents were
2380 * accounted for before we merged into one big extent. If the number of
2381 * extents we accounted for is <= the amount we need for the new range
2382 * then we can return, otherwise drop. Think of it like this
2386 * So we've grown the extent by a MAX_SIZE extent, this would mean we
2387 * need 2 outstanding extents, on one side we have 1 and the other side
2388 * we have 1 so they are == and we can return. But in this case
2390 * [MAX_SIZE+4k][MAX_SIZE+4k]
2392 * Each range on their own accounts for 2 extents, but merged together
2393 * they are only 3 extents worth of accounting, so we need to drop in
2396 old_size
= other
->end
- other
->start
+ 1;
2397 num_extents
= count_max_extents(fs_info
, old_size
);
2398 old_size
= new->end
- new->start
+ 1;
2399 num_extents
+= count_max_extents(fs_info
, old_size
);
2400 if (count_max_extents(fs_info
, new_size
) >= num_extents
)
2403 spin_lock(&inode
->lock
);
2404 btrfs_mod_outstanding_extents(inode
, -1);
2405 spin_unlock(&inode
->lock
);
2408 static void btrfs_add_delalloc_inode(struct btrfs_inode
*inode
)
2410 struct btrfs_root
*root
= inode
->root
;
2411 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
2413 spin_lock(&root
->delalloc_lock
);
2414 ASSERT(list_empty(&inode
->delalloc_inodes
));
2415 list_add_tail(&inode
->delalloc_inodes
, &root
->delalloc_inodes
);
2416 root
->nr_delalloc_inodes
++;
2417 if (root
->nr_delalloc_inodes
== 1) {
2418 spin_lock(&fs_info
->delalloc_root_lock
);
2419 ASSERT(list_empty(&root
->delalloc_root
));
2420 list_add_tail(&root
->delalloc_root
, &fs_info
->delalloc_roots
);
2421 spin_unlock(&fs_info
->delalloc_root_lock
);
2423 spin_unlock(&root
->delalloc_lock
);
2426 void btrfs_del_delalloc_inode(struct btrfs_inode
*inode
)
2428 struct btrfs_root
*root
= inode
->root
;
2429 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
2431 lockdep_assert_held(&root
->delalloc_lock
);
2434 * We may be called after the inode was already deleted from the list,
2435 * namely in the transaction abort path btrfs_destroy_delalloc_inodes(),
2436 * and then later through btrfs_clear_delalloc_extent() while the inode
2437 * still has ->delalloc_bytes > 0.
2439 if (!list_empty(&inode
->delalloc_inodes
)) {
2440 list_del_init(&inode
->delalloc_inodes
);
2441 root
->nr_delalloc_inodes
--;
2442 if (!root
->nr_delalloc_inodes
) {
2443 ASSERT(list_empty(&root
->delalloc_inodes
));
2444 spin_lock(&fs_info
->delalloc_root_lock
);
2445 ASSERT(!list_empty(&root
->delalloc_root
));
2446 list_del_init(&root
->delalloc_root
);
2447 spin_unlock(&fs_info
->delalloc_root_lock
);
2453 * Properly track delayed allocation bytes in the inode and to maintain the
2454 * list of inodes that have pending delalloc work to be done.
2456 void btrfs_set_delalloc_extent(struct btrfs_inode
*inode
, struct extent_state
*state
,
2459 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
2461 lockdep_assert_held(&inode
->io_tree
.lock
);
2463 if ((bits
& EXTENT_DEFRAG
) && !(bits
& EXTENT_DELALLOC
))
2466 * set_bit and clear bit hooks normally require _irqsave/restore
2467 * but in this case, we are only testing for the DELALLOC
2468 * bit, which is only set or cleared with irqs on
2470 if (!(state
->state
& EXTENT_DELALLOC
) && (bits
& EXTENT_DELALLOC
)) {
2471 u64 len
= state
->end
+ 1 - state
->start
;
2472 u64 prev_delalloc_bytes
;
2473 u32 num_extents
= count_max_extents(fs_info
, len
);
2475 spin_lock(&inode
->lock
);
2476 btrfs_mod_outstanding_extents(inode
, num_extents
);
2477 spin_unlock(&inode
->lock
);
2479 /* For sanity tests */
2480 if (btrfs_is_testing(fs_info
))
2483 percpu_counter_add_batch(&fs_info
->delalloc_bytes
, len
,
2484 fs_info
->delalloc_batch
);
2485 spin_lock(&inode
->lock
);
2486 prev_delalloc_bytes
= inode
->delalloc_bytes
;
2487 inode
->delalloc_bytes
+= len
;
2488 if (bits
& EXTENT_DEFRAG
)
2489 inode
->defrag_bytes
+= len
;
2490 spin_unlock(&inode
->lock
);
2493 * We don't need to be under the protection of the inode's lock,
2494 * because we are called while holding the inode's io_tree lock
2495 * and are therefore protected against concurrent calls of this
2496 * function and btrfs_clear_delalloc_extent().
2498 if (!btrfs_is_free_space_inode(inode
) && prev_delalloc_bytes
== 0)
2499 btrfs_add_delalloc_inode(inode
);
2502 if (!(state
->state
& EXTENT_DELALLOC_NEW
) &&
2503 (bits
& EXTENT_DELALLOC_NEW
)) {
2504 spin_lock(&inode
->lock
);
2505 inode
->new_delalloc_bytes
+= state
->end
+ 1 - state
->start
;
2506 spin_unlock(&inode
->lock
);
2511 * Once a range is no longer delalloc this function ensures that proper
2512 * accounting happens.
2514 void btrfs_clear_delalloc_extent(struct btrfs_inode
*inode
,
2515 struct extent_state
*state
, u32 bits
)
2517 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
2518 u64 len
= state
->end
+ 1 - state
->start
;
2519 u32 num_extents
= count_max_extents(fs_info
, len
);
2521 lockdep_assert_held(&inode
->io_tree
.lock
);
2523 if ((state
->state
& EXTENT_DEFRAG
) && (bits
& EXTENT_DEFRAG
)) {
2524 spin_lock(&inode
->lock
);
2525 inode
->defrag_bytes
-= len
;
2526 spin_unlock(&inode
->lock
);
2530 * set_bit and clear bit hooks normally require _irqsave/restore
2531 * but in this case, we are only testing for the DELALLOC
2532 * bit, which is only set or cleared with irqs on
2534 if ((state
->state
& EXTENT_DELALLOC
) && (bits
& EXTENT_DELALLOC
)) {
2535 struct btrfs_root
*root
= inode
->root
;
2536 u64 new_delalloc_bytes
;
2538 spin_lock(&inode
->lock
);
2539 btrfs_mod_outstanding_extents(inode
, -num_extents
);
2540 spin_unlock(&inode
->lock
);
2543 * We don't reserve metadata space for space cache inodes so we
2544 * don't need to call delalloc_release_metadata if there is an
2547 if (bits
& EXTENT_CLEAR_META_RESV
&&
2548 root
!= fs_info
->tree_root
)
2549 btrfs_delalloc_release_metadata(inode
, len
, true);
2551 /* For sanity tests. */
2552 if (btrfs_is_testing(fs_info
))
2555 if (!btrfs_is_data_reloc_root(root
) &&
2556 !btrfs_is_free_space_inode(inode
) &&
2557 !(state
->state
& EXTENT_NORESERVE
) &&
2558 (bits
& EXTENT_CLEAR_DATA_RESV
))
2559 btrfs_free_reserved_data_space_noquota(fs_info
, len
);
2561 percpu_counter_add_batch(&fs_info
->delalloc_bytes
, -len
,
2562 fs_info
->delalloc_batch
);
2563 spin_lock(&inode
->lock
);
2564 inode
->delalloc_bytes
-= len
;
2565 new_delalloc_bytes
= inode
->delalloc_bytes
;
2566 spin_unlock(&inode
->lock
);
2569 * We don't need to be under the protection of the inode's lock,
2570 * because we are called while holding the inode's io_tree lock
2571 * and are therefore protected against concurrent calls of this
2572 * function and btrfs_set_delalloc_extent().
2574 if (!btrfs_is_free_space_inode(inode
) && new_delalloc_bytes
== 0) {
2575 spin_lock(&root
->delalloc_lock
);
2576 btrfs_del_delalloc_inode(inode
);
2577 spin_unlock(&root
->delalloc_lock
);
2581 if ((state
->state
& EXTENT_DELALLOC_NEW
) &&
2582 (bits
& EXTENT_DELALLOC_NEW
)) {
2583 spin_lock(&inode
->lock
);
2584 ASSERT(inode
->new_delalloc_bytes
>= len
);
2585 inode
->new_delalloc_bytes
-= len
;
2586 if (bits
& EXTENT_ADD_INODE_BYTES
)
2587 inode_add_bytes(&inode
->vfs_inode
, len
);
2588 spin_unlock(&inode
->lock
);
2593 * given a list of ordered sums record them in the inode. This happens
2594 * at IO completion time based on sums calculated at bio submission time.
2596 static int add_pending_csums(struct btrfs_trans_handle
*trans
,
2597 struct list_head
*list
)
2599 struct btrfs_ordered_sum
*sum
;
2600 struct btrfs_root
*csum_root
= NULL
;
2603 list_for_each_entry(sum
, list
, list
) {
2604 trans
->adding_csums
= true;
2606 csum_root
= btrfs_csum_root(trans
->fs_info
,
2608 ret
= btrfs_csum_file_blocks(trans
, csum_root
, sum
);
2609 trans
->adding_csums
= false;
2616 static int btrfs_find_new_delalloc_bytes(struct btrfs_inode
*inode
,
2619 struct extent_state
**cached_state
)
2621 u64 search_start
= start
;
2622 const u64 end
= start
+ len
- 1;
2624 while (search_start
< end
) {
2625 const u64 search_len
= end
- search_start
+ 1;
2626 struct extent_map
*em
;
2630 em
= btrfs_get_extent(inode
, NULL
, search_start
, search_len
);
2634 if (em
->disk_bytenr
!= EXTENT_MAP_HOLE
)
2638 if (em
->start
< search_start
)
2639 em_len
-= search_start
- em
->start
;
2640 if (em_len
> search_len
)
2641 em_len
= search_len
;
2643 ret
= set_extent_bit(&inode
->io_tree
, search_start
,
2644 search_start
+ em_len
- 1,
2645 EXTENT_DELALLOC_NEW
, cached_state
);
2647 search_start
= extent_map_end(em
);
2648 free_extent_map(em
);
2655 int btrfs_set_extent_delalloc(struct btrfs_inode
*inode
, u64 start
, u64 end
,
2656 unsigned int extra_bits
,
2657 struct extent_state
**cached_state
)
2659 WARN_ON(PAGE_ALIGNED(end
));
2661 if (start
>= i_size_read(&inode
->vfs_inode
) &&
2662 !(inode
->flags
& BTRFS_INODE_PREALLOC
)) {
2664 * There can't be any extents following eof in this case so just
2665 * set the delalloc new bit for the range directly.
2667 extra_bits
|= EXTENT_DELALLOC_NEW
;
2671 ret
= btrfs_find_new_delalloc_bytes(inode
, start
,
2678 return set_extent_bit(&inode
->io_tree
, start
, end
,
2679 EXTENT_DELALLOC
| extra_bits
, cached_state
);
2682 /* see btrfs_writepage_start_hook for details on why this is required */
2683 struct btrfs_writepage_fixup
{
2684 struct folio
*folio
;
2685 struct btrfs_inode
*inode
;
2686 struct btrfs_work work
;
2689 static void btrfs_writepage_fixup_worker(struct btrfs_work
*work
)
2691 struct btrfs_writepage_fixup
*fixup
=
2692 container_of(work
, struct btrfs_writepage_fixup
, work
);
2693 struct btrfs_ordered_extent
*ordered
;
2694 struct extent_state
*cached_state
= NULL
;
2695 struct extent_changeset
*data_reserved
= NULL
;
2696 struct folio
*folio
= fixup
->folio
;
2697 struct btrfs_inode
*inode
= fixup
->inode
;
2698 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
2699 u64 page_start
= folio_pos(folio
);
2700 u64 page_end
= folio_pos(folio
) + folio_size(folio
) - 1;
2702 bool free_delalloc_space
= true;
2705 * This is similar to page_mkwrite, we need to reserve the space before
2706 * we take the folio lock.
2708 ret
= btrfs_delalloc_reserve_space(inode
, &data_reserved
, page_start
,
2714 * Before we queued this fixup, we took a reference on the folio.
2715 * folio->mapping may go NULL, but it shouldn't be moved to a different
2718 if (!folio
->mapping
|| !folio_test_dirty(folio
) ||
2719 !folio_test_checked(folio
)) {
2721 * Unfortunately this is a little tricky, either
2723 * 1) We got here and our folio had already been dealt with and
2724 * we reserved our space, thus ret == 0, so we need to just
2725 * drop our space reservation and bail. This can happen the
2726 * first time we come into the fixup worker, or could happen
2727 * while waiting for the ordered extent.
2728 * 2) Our folio was already dealt with, but we happened to get an
2729 * ENOSPC above from the btrfs_delalloc_reserve_space. In
2730 * this case we obviously don't have anything to release, but
2731 * because the folio was already dealt with we don't want to
2732 * mark the folio with an error, so make sure we're resetting
2733 * ret to 0. This is why we have this check _before_ the ret
2734 * check, because we do not want to have a surprise ENOSPC
2735 * when the folio was already properly dealt with.
2738 btrfs_delalloc_release_extents(inode
, folio_size(folio
));
2739 btrfs_delalloc_release_space(inode
, data_reserved
,
2740 page_start
, folio_size(folio
),
2748 * We can't mess with the folio state unless it is locked, so now that
2749 * it is locked bail if we failed to make our space reservation.
2754 lock_extent(&inode
->io_tree
, page_start
, page_end
, &cached_state
);
2756 /* already ordered? We're done */
2757 if (folio_test_ordered(folio
))
2760 ordered
= btrfs_lookup_ordered_range(inode
, page_start
, PAGE_SIZE
);
2762 unlock_extent(&inode
->io_tree
, page_start
, page_end
,
2764 folio_unlock(folio
);
2765 btrfs_start_ordered_extent(ordered
);
2766 btrfs_put_ordered_extent(ordered
);
2770 ret
= btrfs_set_extent_delalloc(inode
, page_start
, page_end
, 0,
2776 * Everything went as planned, we're now the owner of a dirty page with
2777 * delayed allocation bits set and space reserved for our COW
2780 * The page was dirty when we started, nothing should have cleaned it.
2782 BUG_ON(!folio_test_dirty(folio
));
2783 free_delalloc_space
= false;
2785 btrfs_delalloc_release_extents(inode
, PAGE_SIZE
);
2786 if (free_delalloc_space
)
2787 btrfs_delalloc_release_space(inode
, data_reserved
, page_start
,
2789 unlock_extent(&inode
->io_tree
, page_start
, page_end
, &cached_state
);
2793 * We hit ENOSPC or other errors. Update the mapping and page
2794 * to reflect the errors and clean the page.
2796 mapping_set_error(folio
->mapping
, ret
);
2797 btrfs_mark_ordered_io_finished(inode
, folio
, page_start
,
2798 folio_size(folio
), !ret
);
2799 folio_clear_dirty_for_io(folio
);
2801 btrfs_folio_clear_checked(fs_info
, folio
, page_start
, PAGE_SIZE
);
2802 folio_unlock(folio
);
2805 extent_changeset_free(data_reserved
);
2807 * As a precaution, do a delayed iput in case it would be the last iput
2808 * that could need flushing space. Recursing back to fixup worker would
2811 btrfs_add_delayed_iput(inode
);
2815 * There are a few paths in the higher layers of the kernel that directly
2816 * set the folio dirty bit without asking the filesystem if it is a
2817 * good idea. This causes problems because we want to make sure COW
2818 * properly happens and the data=ordered rules are followed.
2820 * In our case any range that doesn't have the ORDERED bit set
2821 * hasn't been properly setup for IO. We kick off an async process
2822 * to fix it up. The async helper will wait for ordered extents, set
2823 * the delalloc bit and make it safe to write the folio.
2825 int btrfs_writepage_cow_fixup(struct folio
*folio
)
2827 struct inode
*inode
= folio
->mapping
->host
;
2828 struct btrfs_fs_info
*fs_info
= inode_to_fs_info(inode
);
2829 struct btrfs_writepage_fixup
*fixup
;
2831 /* This folio has ordered extent covering it already */
2832 if (folio_test_ordered(folio
))
2836 * folio_checked is set below when we create a fixup worker for this
2837 * folio, don't try to create another one if we're already
2838 * folio_test_checked.
2840 * The extent_io writepage code will redirty the foio if we send back
2843 if (folio_test_checked(folio
))
2846 fixup
= kzalloc(sizeof(*fixup
), GFP_NOFS
);
2851 * We are already holding a reference to this inode from
2852 * write_cache_pages. We need to hold it because the space reservation
2853 * takes place outside of the folio lock, and we can't trust
2854 * page->mapping outside of the folio lock.
2857 btrfs_folio_set_checked(fs_info
, folio
, folio_pos(folio
), folio_size(folio
));
2859 btrfs_init_work(&fixup
->work
, btrfs_writepage_fixup_worker
, NULL
);
2860 fixup
->folio
= folio
;
2861 fixup
->inode
= BTRFS_I(inode
);
2862 btrfs_queue_work(fs_info
->fixup_workers
, &fixup
->work
);
2867 static int insert_reserved_file_extent(struct btrfs_trans_handle
*trans
,
2868 struct btrfs_inode
*inode
, u64 file_pos
,
2869 struct btrfs_file_extent_item
*stack_fi
,
2870 const bool update_inode_bytes
,
2871 u64 qgroup_reserved
)
2873 struct btrfs_root
*root
= inode
->root
;
2874 const u64 sectorsize
= root
->fs_info
->sectorsize
;
2875 struct btrfs_path
*path
;
2876 struct extent_buffer
*leaf
;
2877 struct btrfs_key ins
;
2878 u64 disk_num_bytes
= btrfs_stack_file_extent_disk_num_bytes(stack_fi
);
2879 u64 disk_bytenr
= btrfs_stack_file_extent_disk_bytenr(stack_fi
);
2880 u64 offset
= btrfs_stack_file_extent_offset(stack_fi
);
2881 u64 num_bytes
= btrfs_stack_file_extent_num_bytes(stack_fi
);
2882 u64 ram_bytes
= btrfs_stack_file_extent_ram_bytes(stack_fi
);
2883 struct btrfs_drop_extents_args drop_args
= { 0 };
2886 path
= btrfs_alloc_path();
2891 * we may be replacing one extent in the tree with another.
2892 * The new extent is pinned in the extent map, and we don't want
2893 * to drop it from the cache until it is completely in the btree.
2895 * So, tell btrfs_drop_extents to leave this extent in the cache.
2896 * the caller is expected to unpin it and allow it to be merged
2899 drop_args
.path
= path
;
2900 drop_args
.start
= file_pos
;
2901 drop_args
.end
= file_pos
+ num_bytes
;
2902 drop_args
.replace_extent
= true;
2903 drop_args
.extent_item_size
= sizeof(*stack_fi
);
2904 ret
= btrfs_drop_extents(trans
, root
, inode
, &drop_args
);
2908 if (!drop_args
.extent_inserted
) {
2909 ins
.objectid
= btrfs_ino(inode
);
2910 ins
.offset
= file_pos
;
2911 ins
.type
= BTRFS_EXTENT_DATA_KEY
;
2913 ret
= btrfs_insert_empty_item(trans
, root
, path
, &ins
,
2918 leaf
= path
->nodes
[0];
2919 btrfs_set_stack_file_extent_generation(stack_fi
, trans
->transid
);
2920 write_extent_buffer(leaf
, stack_fi
,
2921 btrfs_item_ptr_offset(leaf
, path
->slots
[0]),
2922 sizeof(struct btrfs_file_extent_item
));
2924 btrfs_mark_buffer_dirty(trans
, leaf
);
2925 btrfs_release_path(path
);
2928 * If we dropped an inline extent here, we know the range where it is
2929 * was not marked with the EXTENT_DELALLOC_NEW bit, so we update the
2930 * number of bytes only for that range containing the inline extent.
2931 * The remaining of the range will be processed when clearning the
2932 * EXTENT_DELALLOC_BIT bit through the ordered extent completion.
2934 if (file_pos
== 0 && !IS_ALIGNED(drop_args
.bytes_found
, sectorsize
)) {
2935 u64 inline_size
= round_down(drop_args
.bytes_found
, sectorsize
);
2937 inline_size
= drop_args
.bytes_found
- inline_size
;
2938 btrfs_update_inode_bytes(inode
, sectorsize
, inline_size
);
2939 drop_args
.bytes_found
-= inline_size
;
2940 num_bytes
-= sectorsize
;
2943 if (update_inode_bytes
)
2944 btrfs_update_inode_bytes(inode
, num_bytes
, drop_args
.bytes_found
);
2946 ins
.objectid
= disk_bytenr
;
2947 ins
.offset
= disk_num_bytes
;
2948 ins
.type
= BTRFS_EXTENT_ITEM_KEY
;
2950 ret
= btrfs_inode_set_file_extent_range(inode
, file_pos
, ram_bytes
);
2954 ret
= btrfs_alloc_reserved_file_extent(trans
, root
, btrfs_ino(inode
),
2956 qgroup_reserved
, &ins
);
2958 btrfs_free_path(path
);
2963 static void btrfs_release_delalloc_bytes(struct btrfs_fs_info
*fs_info
,
2966 struct btrfs_block_group
*cache
;
2968 cache
= btrfs_lookup_block_group(fs_info
, start
);
2971 spin_lock(&cache
->lock
);
2972 cache
->delalloc_bytes
-= len
;
2973 spin_unlock(&cache
->lock
);
2975 btrfs_put_block_group(cache
);
2978 static int insert_ordered_extent_file_extent(struct btrfs_trans_handle
*trans
,
2979 struct btrfs_ordered_extent
*oe
)
2981 struct btrfs_file_extent_item stack_fi
;
2982 bool update_inode_bytes
;
2983 u64 num_bytes
= oe
->num_bytes
;
2984 u64 ram_bytes
= oe
->ram_bytes
;
2986 memset(&stack_fi
, 0, sizeof(stack_fi
));
2987 btrfs_set_stack_file_extent_type(&stack_fi
, BTRFS_FILE_EXTENT_REG
);
2988 btrfs_set_stack_file_extent_disk_bytenr(&stack_fi
, oe
->disk_bytenr
);
2989 btrfs_set_stack_file_extent_disk_num_bytes(&stack_fi
,
2990 oe
->disk_num_bytes
);
2991 btrfs_set_stack_file_extent_offset(&stack_fi
, oe
->offset
);
2992 if (test_bit(BTRFS_ORDERED_TRUNCATED
, &oe
->flags
))
2993 num_bytes
= oe
->truncated_len
;
2994 btrfs_set_stack_file_extent_num_bytes(&stack_fi
, num_bytes
);
2995 btrfs_set_stack_file_extent_ram_bytes(&stack_fi
, ram_bytes
);
2996 btrfs_set_stack_file_extent_compression(&stack_fi
, oe
->compress_type
);
2997 /* Encryption and other encoding is reserved and all 0 */
3000 * For delalloc, when completing an ordered extent we update the inode's
3001 * bytes when clearing the range in the inode's io tree, so pass false
3002 * as the argument 'update_inode_bytes' to insert_reserved_file_extent(),
3003 * except if the ordered extent was truncated.
3005 update_inode_bytes
= test_bit(BTRFS_ORDERED_DIRECT
, &oe
->flags
) ||
3006 test_bit(BTRFS_ORDERED_ENCODED
, &oe
->flags
) ||
3007 test_bit(BTRFS_ORDERED_TRUNCATED
, &oe
->flags
);
3009 return insert_reserved_file_extent(trans
, oe
->inode
,
3010 oe
->file_offset
, &stack_fi
,
3011 update_inode_bytes
, oe
->qgroup_rsv
);
3015 * As ordered data IO finishes, this gets called so we can finish
3016 * an ordered extent if the range of bytes in the file it covers are
3019 int btrfs_finish_one_ordered(struct btrfs_ordered_extent
*ordered_extent
)
3021 struct btrfs_inode
*inode
= ordered_extent
->inode
;
3022 struct btrfs_root
*root
= inode
->root
;
3023 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
3024 struct btrfs_trans_handle
*trans
= NULL
;
3025 struct extent_io_tree
*io_tree
= &inode
->io_tree
;
3026 struct extent_state
*cached_state
= NULL
;
3028 int compress_type
= 0;
3030 u64 logical_len
= ordered_extent
->num_bytes
;
3031 bool freespace_inode
;
3032 bool truncated
= false;
3033 bool clear_reserved_extent
= true;
3034 unsigned int clear_bits
= EXTENT_DEFRAG
;
3036 start
= ordered_extent
->file_offset
;
3037 end
= start
+ ordered_extent
->num_bytes
- 1;
3039 if (!test_bit(BTRFS_ORDERED_NOCOW
, &ordered_extent
->flags
) &&
3040 !test_bit(BTRFS_ORDERED_PREALLOC
, &ordered_extent
->flags
) &&
3041 !test_bit(BTRFS_ORDERED_DIRECT
, &ordered_extent
->flags
) &&
3042 !test_bit(BTRFS_ORDERED_ENCODED
, &ordered_extent
->flags
))
3043 clear_bits
|= EXTENT_DELALLOC_NEW
;
3045 freespace_inode
= btrfs_is_free_space_inode(inode
);
3046 if (!freespace_inode
)
3047 btrfs_lockdep_acquire(fs_info
, btrfs_ordered_extent
);
3049 if (test_bit(BTRFS_ORDERED_IOERR
, &ordered_extent
->flags
)) {
3054 if (btrfs_is_zoned(fs_info
))
3055 btrfs_zone_finish_endio(fs_info
, ordered_extent
->disk_bytenr
,
3056 ordered_extent
->disk_num_bytes
);
3058 if (test_bit(BTRFS_ORDERED_TRUNCATED
, &ordered_extent
->flags
)) {
3060 logical_len
= ordered_extent
->truncated_len
;
3061 /* Truncated the entire extent, don't bother adding */
3067 * If it's a COW write we need to lock the extent range as we will be
3068 * inserting/replacing file extent items and unpinning an extent map.
3069 * This must be taken before joining a transaction, as it's a higher
3070 * level lock (like the inode's VFS lock), otherwise we can run into an
3071 * ABBA deadlock with other tasks (transactions work like a lock,
3072 * depending on their current state).
3074 if (!test_bit(BTRFS_ORDERED_NOCOW
, &ordered_extent
->flags
)) {
3075 clear_bits
|= EXTENT_LOCKED
;
3076 lock_extent(io_tree
, start
, end
, &cached_state
);
3079 if (freespace_inode
)
3080 trans
= btrfs_join_transaction_spacecache(root
);
3082 trans
= btrfs_join_transaction(root
);
3083 if (IS_ERR(trans
)) {
3084 ret
= PTR_ERR(trans
);
3089 trans
->block_rsv
= &inode
->block_rsv
;
3091 ret
= btrfs_insert_raid_extent(trans
, ordered_extent
);
3093 btrfs_abort_transaction(trans
, ret
);
3097 if (test_bit(BTRFS_ORDERED_NOCOW
, &ordered_extent
->flags
)) {
3099 ASSERT(list_empty(&ordered_extent
->list
));
3100 if (!list_empty(&ordered_extent
->list
)) {
3102 btrfs_abort_transaction(trans
, ret
);
3106 btrfs_inode_safe_disk_i_size_write(inode
, 0);
3107 ret
= btrfs_update_inode_fallback(trans
, inode
);
3109 /* -ENOMEM or corruption */
3110 btrfs_abort_transaction(trans
, ret
);
3115 if (test_bit(BTRFS_ORDERED_COMPRESSED
, &ordered_extent
->flags
))
3116 compress_type
= ordered_extent
->compress_type
;
3117 if (test_bit(BTRFS_ORDERED_PREALLOC
, &ordered_extent
->flags
)) {
3118 BUG_ON(compress_type
);
3119 ret
= btrfs_mark_extent_written(trans
, inode
,
3120 ordered_extent
->file_offset
,
3121 ordered_extent
->file_offset
+
3123 btrfs_zoned_release_data_reloc_bg(fs_info
, ordered_extent
->disk_bytenr
,
3124 ordered_extent
->disk_num_bytes
);
3126 BUG_ON(root
== fs_info
->tree_root
);
3127 ret
= insert_ordered_extent_file_extent(trans
, ordered_extent
);
3129 clear_reserved_extent
= false;
3130 btrfs_release_delalloc_bytes(fs_info
,
3131 ordered_extent
->disk_bytenr
,
3132 ordered_extent
->disk_num_bytes
);
3136 btrfs_abort_transaction(trans
, ret
);
3140 ret
= unpin_extent_cache(inode
, ordered_extent
->file_offset
,
3141 ordered_extent
->num_bytes
, trans
->transid
);
3143 btrfs_abort_transaction(trans
, ret
);
3147 ret
= add_pending_csums(trans
, &ordered_extent
->list
);
3149 btrfs_abort_transaction(trans
, ret
);
3154 * If this is a new delalloc range, clear its new delalloc flag to
3155 * update the inode's number of bytes. This needs to be done first
3156 * before updating the inode item.
3158 if ((clear_bits
& EXTENT_DELALLOC_NEW
) &&
3159 !test_bit(BTRFS_ORDERED_TRUNCATED
, &ordered_extent
->flags
))
3160 clear_extent_bit(&inode
->io_tree
, start
, end
,
3161 EXTENT_DELALLOC_NEW
| EXTENT_ADD_INODE_BYTES
,
3164 btrfs_inode_safe_disk_i_size_write(inode
, 0);
3165 ret
= btrfs_update_inode_fallback(trans
, inode
);
3166 if (ret
) { /* -ENOMEM or corruption */
3167 btrfs_abort_transaction(trans
, ret
);
3171 clear_extent_bit(&inode
->io_tree
, start
, end
, clear_bits
,
3175 btrfs_end_transaction(trans
);
3177 if (ret
|| truncated
) {
3178 u64 unwritten_start
= start
;
3181 * If we failed to finish this ordered extent for any reason we
3182 * need to make sure BTRFS_ORDERED_IOERR is set on the ordered
3183 * extent, and mark the inode with the error if it wasn't
3184 * already set. Any error during writeback would have already
3185 * set the mapping error, so we need to set it if we're the ones
3186 * marking this ordered extent as failed.
3189 btrfs_mark_ordered_extent_error(ordered_extent
);
3192 unwritten_start
+= logical_len
;
3193 clear_extent_uptodate(io_tree
, unwritten_start
, end
, NULL
);
3196 * Drop extent maps for the part of the extent we didn't write.
3198 * We have an exception here for the free_space_inode, this is
3199 * because when we do btrfs_get_extent() on the free space inode
3200 * we will search the commit root. If this is a new block group
3201 * we won't find anything, and we will trip over the assert in
3202 * writepage where we do ASSERT(em->block_start !=
3205 * Theoretically we could also skip this for any NOCOW extent as
3206 * we don't mess with the extent map tree in the NOCOW case, but
3207 * for now simply skip this if we are the free space inode.
3209 if (!btrfs_is_free_space_inode(inode
))
3210 btrfs_drop_extent_map_range(inode
, unwritten_start
,
3214 * If the ordered extent had an IOERR or something else went
3215 * wrong we need to return the space for this ordered extent
3216 * back to the allocator. We only free the extent in the
3217 * truncated case if we didn't write out the extent at all.
3219 * If we made it past insert_reserved_file_extent before we
3220 * errored out then we don't need to do this as the accounting
3221 * has already been done.
3223 if ((ret
|| !logical_len
) &&
3224 clear_reserved_extent
&&
3225 !test_bit(BTRFS_ORDERED_NOCOW
, &ordered_extent
->flags
) &&
3226 !test_bit(BTRFS_ORDERED_PREALLOC
, &ordered_extent
->flags
)) {
3228 * Discard the range before returning it back to the
3231 if (ret
&& btrfs_test_opt(fs_info
, DISCARD_SYNC
))
3232 btrfs_discard_extent(fs_info
,
3233 ordered_extent
->disk_bytenr
,
3234 ordered_extent
->disk_num_bytes
,
3236 btrfs_free_reserved_extent(fs_info
,
3237 ordered_extent
->disk_bytenr
,
3238 ordered_extent
->disk_num_bytes
, 1);
3240 * Actually free the qgroup rsv which was released when
3241 * the ordered extent was created.
3243 btrfs_qgroup_free_refroot(fs_info
, btrfs_root_id(inode
->root
),
3244 ordered_extent
->qgroup_rsv
,
3245 BTRFS_QGROUP_RSV_DATA
);
3250 * This needs to be done to make sure anybody waiting knows we are done
3251 * updating everything for this ordered extent.
3253 btrfs_remove_ordered_extent(inode
, ordered_extent
);
3256 btrfs_put_ordered_extent(ordered_extent
);
3257 /* once for the tree */
3258 btrfs_put_ordered_extent(ordered_extent
);
3263 int btrfs_finish_ordered_io(struct btrfs_ordered_extent
*ordered
)
3265 if (btrfs_is_zoned(ordered
->inode
->root
->fs_info
) &&
3266 !test_bit(BTRFS_ORDERED_IOERR
, &ordered
->flags
) &&
3267 list_empty(&ordered
->bioc_list
))
3268 btrfs_finish_ordered_zoned(ordered
);
3269 return btrfs_finish_one_ordered(ordered
);
3273 * Verify the checksum for a single sector without any extra action that depend
3274 * on the type of I/O.
3276 int btrfs_check_sector_csum(struct btrfs_fs_info
*fs_info
, struct page
*page
,
3277 u32 pgoff
, u8
*csum
, const u8
* const csum_expected
)
3279 SHASH_DESC_ON_STACK(shash
, fs_info
->csum_shash
);
3282 ASSERT(pgoff
+ fs_info
->sectorsize
<= PAGE_SIZE
);
3284 shash
->tfm
= fs_info
->csum_shash
;
3286 kaddr
= kmap_local_page(page
) + pgoff
;
3287 crypto_shash_digest(shash
, kaddr
, fs_info
->sectorsize
, csum
);
3288 kunmap_local(kaddr
);
3290 if (memcmp(csum
, csum_expected
, fs_info
->csum_size
))
3296 * Verify the checksum of a single data sector.
3298 * @bbio: btrfs_io_bio which contains the csum
3299 * @dev: device the sector is on
3300 * @bio_offset: offset to the beginning of the bio (in bytes)
3301 * @bv: bio_vec to check
3303 * Check if the checksum on a data block is valid. When a checksum mismatch is
3304 * detected, report the error and fill the corrupted range with zero.
3306 * Return %true if the sector is ok or had no checksum to start with, else %false.
3308 bool btrfs_data_csum_ok(struct btrfs_bio
*bbio
, struct btrfs_device
*dev
,
3309 u32 bio_offset
, struct bio_vec
*bv
)
3311 struct btrfs_inode
*inode
= bbio
->inode
;
3312 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
3313 u64 file_offset
= bbio
->file_offset
+ bio_offset
;
3314 u64 end
= file_offset
+ bv
->bv_len
- 1;
3316 u8 csum
[BTRFS_CSUM_SIZE
];
3318 ASSERT(bv
->bv_len
== fs_info
->sectorsize
);
3323 if (btrfs_is_data_reloc_root(inode
->root
) &&
3324 test_range_bit(&inode
->io_tree
, file_offset
, end
, EXTENT_NODATASUM
,
3326 /* Skip the range without csum for data reloc inode */
3327 clear_extent_bits(&inode
->io_tree
, file_offset
, end
,
3332 csum_expected
= bbio
->csum
+ (bio_offset
>> fs_info
->sectorsize_bits
) *
3334 if (btrfs_check_sector_csum(fs_info
, bv
->bv_page
, bv
->bv_offset
, csum
,
3340 btrfs_print_data_csum_error(inode
, file_offset
, csum
, csum_expected
,
3343 btrfs_dev_stat_inc_and_print(dev
, BTRFS_DEV_STAT_CORRUPTION_ERRS
);
3349 * Perform a delayed iput on @inode.
3351 * @inode: The inode we want to perform iput on
3353 * This function uses the generic vfs_inode::i_count to track whether we should
3354 * just decrement it (in case it's > 1) or if this is the last iput then link
3355 * the inode to the delayed iput machinery. Delayed iputs are processed at
3356 * transaction commit time/superblock commit/cleaner kthread.
3358 void btrfs_add_delayed_iput(struct btrfs_inode
*inode
)
3360 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
3361 unsigned long flags
;
3363 if (atomic_add_unless(&inode
->vfs_inode
.i_count
, -1, 1))
3366 atomic_inc(&fs_info
->nr_delayed_iputs
);
3368 * Need to be irq safe here because we can be called from either an irq
3369 * context (see bio.c and btrfs_put_ordered_extent()) or a non-irq
3372 spin_lock_irqsave(&fs_info
->delayed_iput_lock
, flags
);
3373 ASSERT(list_empty(&inode
->delayed_iput
));
3374 list_add_tail(&inode
->delayed_iput
, &fs_info
->delayed_iputs
);
3375 spin_unlock_irqrestore(&fs_info
->delayed_iput_lock
, flags
);
3376 if (!test_bit(BTRFS_FS_CLEANER_RUNNING
, &fs_info
->flags
))
3377 wake_up_process(fs_info
->cleaner_kthread
);
3380 static void run_delayed_iput_locked(struct btrfs_fs_info
*fs_info
,
3381 struct btrfs_inode
*inode
)
3383 list_del_init(&inode
->delayed_iput
);
3384 spin_unlock_irq(&fs_info
->delayed_iput_lock
);
3385 iput(&inode
->vfs_inode
);
3386 if (atomic_dec_and_test(&fs_info
->nr_delayed_iputs
))
3387 wake_up(&fs_info
->delayed_iputs_wait
);
3388 spin_lock_irq(&fs_info
->delayed_iput_lock
);
3391 static void btrfs_run_delayed_iput(struct btrfs_fs_info
*fs_info
,
3392 struct btrfs_inode
*inode
)
3394 if (!list_empty(&inode
->delayed_iput
)) {
3395 spin_lock_irq(&fs_info
->delayed_iput_lock
);
3396 if (!list_empty(&inode
->delayed_iput
))
3397 run_delayed_iput_locked(fs_info
, inode
);
3398 spin_unlock_irq(&fs_info
->delayed_iput_lock
);
3402 void btrfs_run_delayed_iputs(struct btrfs_fs_info
*fs_info
)
3405 * btrfs_put_ordered_extent() can run in irq context (see bio.c), which
3406 * calls btrfs_add_delayed_iput() and that needs to lock
3407 * fs_info->delayed_iput_lock. So we need to disable irqs here to
3408 * prevent a deadlock.
3410 spin_lock_irq(&fs_info
->delayed_iput_lock
);
3411 while (!list_empty(&fs_info
->delayed_iputs
)) {
3412 struct btrfs_inode
*inode
;
3414 inode
= list_first_entry(&fs_info
->delayed_iputs
,
3415 struct btrfs_inode
, delayed_iput
);
3416 run_delayed_iput_locked(fs_info
, inode
);
3417 if (need_resched()) {
3418 spin_unlock_irq(&fs_info
->delayed_iput_lock
);
3420 spin_lock_irq(&fs_info
->delayed_iput_lock
);
3423 spin_unlock_irq(&fs_info
->delayed_iput_lock
);
3427 * Wait for flushing all delayed iputs
3429 * @fs_info: the filesystem
3431 * This will wait on any delayed iputs that are currently running with KILLABLE
3432 * set. Once they are all done running we will return, unless we are killed in
3433 * which case we return EINTR. This helps in user operations like fallocate etc
3434 * that might get blocked on the iputs.
3436 * Return EINTR if we were killed, 0 if nothing's pending
3438 int btrfs_wait_on_delayed_iputs(struct btrfs_fs_info
*fs_info
)
3440 int ret
= wait_event_killable(fs_info
->delayed_iputs_wait
,
3441 atomic_read(&fs_info
->nr_delayed_iputs
) == 0);
3448 * This creates an orphan entry for the given inode in case something goes wrong
3449 * in the middle of an unlink.
3451 int btrfs_orphan_add(struct btrfs_trans_handle
*trans
,
3452 struct btrfs_inode
*inode
)
3456 ret
= btrfs_insert_orphan_item(trans
, inode
->root
, btrfs_ino(inode
));
3457 if (ret
&& ret
!= -EEXIST
) {
3458 btrfs_abort_transaction(trans
, ret
);
3466 * We have done the delete so we can go ahead and remove the orphan item for
3467 * this particular inode.
3469 static int btrfs_orphan_del(struct btrfs_trans_handle
*trans
,
3470 struct btrfs_inode
*inode
)
3472 return btrfs_del_orphan_item(trans
, inode
->root
, btrfs_ino(inode
));
3476 * this cleans up any orphans that may be left on the list from the last use
3479 int btrfs_orphan_cleanup(struct btrfs_root
*root
)
3481 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
3482 struct btrfs_path
*path
;
3483 struct extent_buffer
*leaf
;
3484 struct btrfs_key key
, found_key
;
3485 struct btrfs_trans_handle
*trans
;
3486 struct inode
*inode
;
3487 u64 last_objectid
= 0;
3488 int ret
= 0, nr_unlink
= 0;
3490 if (test_and_set_bit(BTRFS_ROOT_ORPHAN_CLEANUP
, &root
->state
))
3493 path
= btrfs_alloc_path();
3498 path
->reada
= READA_BACK
;
3500 key
.objectid
= BTRFS_ORPHAN_OBJECTID
;
3501 key
.type
= BTRFS_ORPHAN_ITEM_KEY
;
3502 key
.offset
= (u64
)-1;
3505 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
3510 * if ret == 0 means we found what we were searching for, which
3511 * is weird, but possible, so only screw with path if we didn't
3512 * find the key and see if we have stuff that matches
3516 if (path
->slots
[0] == 0)
3521 /* pull out the item */
3522 leaf
= path
->nodes
[0];
3523 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
3525 /* make sure the item matches what we want */
3526 if (found_key
.objectid
!= BTRFS_ORPHAN_OBJECTID
)
3528 if (found_key
.type
!= BTRFS_ORPHAN_ITEM_KEY
)
3531 /* release the path since we're done with it */
3532 btrfs_release_path(path
);
3535 * this is where we are basically btrfs_lookup, without the
3536 * crossing root thing. we store the inode number in the
3537 * offset of the orphan item.
3540 if (found_key
.offset
== last_objectid
) {
3542 * We found the same inode as before. This means we were
3543 * not able to remove its items via eviction triggered
3544 * by an iput(). A transaction abort may have happened,
3545 * due to -ENOSPC for example, so try to grab the error
3546 * that lead to a transaction abort, if any.
3549 "Error removing orphan entry, stopping orphan cleanup");
3550 ret
= BTRFS_FS_ERROR(fs_info
) ?: -EINVAL
;
3554 last_objectid
= found_key
.offset
;
3556 found_key
.objectid
= found_key
.offset
;
3557 found_key
.type
= BTRFS_INODE_ITEM_KEY
;
3558 found_key
.offset
= 0;
3559 inode
= btrfs_iget(last_objectid
, root
);
3560 if (IS_ERR(inode
)) {
3561 ret
= PTR_ERR(inode
);
3567 if (!inode
&& root
== fs_info
->tree_root
) {
3568 struct btrfs_root
*dead_root
;
3569 int is_dead_root
= 0;
3572 * This is an orphan in the tree root. Currently these
3573 * could come from 2 sources:
3574 * a) a root (snapshot/subvolume) deletion in progress
3575 * b) a free space cache inode
3576 * We need to distinguish those two, as the orphan item
3577 * for a root must not get deleted before the deletion
3578 * of the snapshot/subvolume's tree completes.
3580 * btrfs_find_orphan_roots() ran before us, which has
3581 * found all deleted roots and loaded them into
3582 * fs_info->fs_roots_radix. So here we can find if an
3583 * orphan item corresponds to a deleted root by looking
3584 * up the root from that radix tree.
3587 spin_lock(&fs_info
->fs_roots_radix_lock
);
3588 dead_root
= radix_tree_lookup(&fs_info
->fs_roots_radix
,
3589 (unsigned long)found_key
.objectid
);
3590 if (dead_root
&& btrfs_root_refs(&dead_root
->root_item
) == 0)
3592 spin_unlock(&fs_info
->fs_roots_radix_lock
);
3595 /* prevent this orphan from being found again */
3596 key
.offset
= found_key
.objectid
- 1;
3603 * If we have an inode with links, there are a couple of
3606 * 1. We were halfway through creating fsverity metadata for the
3607 * file. In that case, the orphan item represents incomplete
3608 * fsverity metadata which must be cleaned up with
3609 * btrfs_drop_verity_items and deleting the orphan item.
3611 * 2. Old kernels (before v3.12) used to create an
3612 * orphan item for truncate indicating that there were possibly
3613 * extent items past i_size that needed to be deleted. In v3.12,
3614 * truncate was changed to update i_size in sync with the extent
3615 * items, but the (useless) orphan item was still created. Since
3616 * v4.18, we don't create the orphan item for truncate at all.
3618 * So, this item could mean that we need to do a truncate, but
3619 * only if this filesystem was last used on a pre-v3.12 kernel
3620 * and was not cleanly unmounted. The odds of that are quite
3621 * slim, and it's a pain to do the truncate now, so just delete
3624 * It's also possible that this orphan item was supposed to be
3625 * deleted but wasn't. The inode number may have been reused,
3626 * but either way, we can delete the orphan item.
3628 if (!inode
|| inode
->i_nlink
) {
3630 ret
= btrfs_drop_verity_items(BTRFS_I(inode
));
3636 trans
= btrfs_start_transaction(root
, 1);
3637 if (IS_ERR(trans
)) {
3638 ret
= PTR_ERR(trans
);
3641 btrfs_debug(fs_info
, "auto deleting %Lu",
3642 found_key
.objectid
);
3643 ret
= btrfs_del_orphan_item(trans
, root
,
3644 found_key
.objectid
);
3645 btrfs_end_transaction(trans
);
3653 /* this will do delete_inode and everything for us */
3656 /* release the path since we're done with it */
3657 btrfs_release_path(path
);
3659 if (test_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED
, &root
->state
)) {
3660 trans
= btrfs_join_transaction(root
);
3662 btrfs_end_transaction(trans
);
3666 btrfs_debug(fs_info
, "unlinked %d orphans", nr_unlink
);
3670 btrfs_err(fs_info
, "could not do orphan cleanup %d", ret
);
3671 btrfs_free_path(path
);
3676 * very simple check to peek ahead in the leaf looking for xattrs. If we
3677 * don't find any xattrs, we know there can't be any acls.
3679 * slot is the slot the inode is in, objectid is the objectid of the inode
3681 static noinline
int acls_after_inode_item(struct extent_buffer
*leaf
,
3682 int slot
, u64 objectid
,
3683 int *first_xattr_slot
)
3685 u32 nritems
= btrfs_header_nritems(leaf
);
3686 struct btrfs_key found_key
;
3687 static u64 xattr_access
= 0;
3688 static u64 xattr_default
= 0;
3691 if (!xattr_access
) {
3692 xattr_access
= btrfs_name_hash(XATTR_NAME_POSIX_ACL_ACCESS
,
3693 strlen(XATTR_NAME_POSIX_ACL_ACCESS
));
3694 xattr_default
= btrfs_name_hash(XATTR_NAME_POSIX_ACL_DEFAULT
,
3695 strlen(XATTR_NAME_POSIX_ACL_DEFAULT
));
3699 *first_xattr_slot
= -1;
3700 while (slot
< nritems
) {
3701 btrfs_item_key_to_cpu(leaf
, &found_key
, slot
);
3703 /* we found a different objectid, there must not be acls */
3704 if (found_key
.objectid
!= objectid
)
3707 /* we found an xattr, assume we've got an acl */
3708 if (found_key
.type
== BTRFS_XATTR_ITEM_KEY
) {
3709 if (*first_xattr_slot
== -1)
3710 *first_xattr_slot
= slot
;
3711 if (found_key
.offset
== xattr_access
||
3712 found_key
.offset
== xattr_default
)
3717 * we found a key greater than an xattr key, there can't
3718 * be any acls later on
3720 if (found_key
.type
> BTRFS_XATTR_ITEM_KEY
)
3727 * it goes inode, inode backrefs, xattrs, extents,
3728 * so if there are a ton of hard links to an inode there can
3729 * be a lot of backrefs. Don't waste time searching too hard,
3730 * this is just an optimization
3735 /* we hit the end of the leaf before we found an xattr or
3736 * something larger than an xattr. We have to assume the inode
3739 if (*first_xattr_slot
== -1)
3740 *first_xattr_slot
= slot
;
3744 static int btrfs_init_file_extent_tree(struct btrfs_inode
*inode
)
3746 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
3748 if (WARN_ON_ONCE(inode
->file_extent_tree
))
3750 if (btrfs_fs_incompat(fs_info
, NO_HOLES
))
3752 if (!S_ISREG(inode
->vfs_inode
.i_mode
))
3754 if (btrfs_is_free_space_inode(inode
))
3757 inode
->file_extent_tree
= kmalloc(sizeof(struct extent_io_tree
), GFP_KERNEL
);
3758 if (!inode
->file_extent_tree
)
3761 extent_io_tree_init(fs_info
, inode
->file_extent_tree
, IO_TREE_INODE_FILE_EXTENT
);
3762 /* Lockdep class is set only for the file extent tree. */
3763 lockdep_set_class(&inode
->file_extent_tree
->lock
, &file_extent_tree_class
);
3768 static int btrfs_add_inode_to_root(struct btrfs_inode
*inode
, bool prealloc
)
3770 struct btrfs_root
*root
= inode
->root
;
3771 struct btrfs_inode
*existing
;
3772 const u64 ino
= btrfs_ino(inode
);
3775 if (inode_unhashed(&inode
->vfs_inode
))
3779 ret
= xa_reserve(&root
->inodes
, ino
, GFP_NOFS
);
3784 existing
= xa_store(&root
->inodes
, ino
, inode
, GFP_ATOMIC
);
3786 if (xa_is_err(existing
)) {
3787 ret
= xa_err(existing
);
3788 ASSERT(ret
!= -EINVAL
);
3789 ASSERT(ret
!= -ENOMEM
);
3791 } else if (existing
) {
3792 WARN_ON(!(existing
->vfs_inode
.i_state
& (I_WILL_FREE
| I_FREEING
)));
3799 * Read a locked inode from the btree into the in-memory inode and add it to
3800 * its root list/tree.
3802 * On failure clean up the inode.
3804 static int btrfs_read_locked_inode(struct inode
*inode
, struct btrfs_path
*path
)
3806 struct btrfs_fs_info
*fs_info
= inode_to_fs_info(inode
);
3807 struct extent_buffer
*leaf
;
3808 struct btrfs_inode_item
*inode_item
;
3809 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
3810 struct btrfs_key location
;
3815 bool filled
= false;
3816 int first_xattr_slot
;
3818 ret
= btrfs_init_file_extent_tree(BTRFS_I(inode
));
3822 ret
= btrfs_fill_inode(inode
, &rdev
);
3828 btrfs_get_inode_key(BTRFS_I(inode
), &location
);
3830 ret
= btrfs_lookup_inode(NULL
, root
, path
, &location
, 0);
3833 * ret > 0 can come from btrfs_search_slot called by
3834 * btrfs_lookup_inode(), this means the inode was not found.
3841 leaf
= path
->nodes
[0];
3846 inode_item
= btrfs_item_ptr(leaf
, path
->slots
[0],
3847 struct btrfs_inode_item
);
3848 inode
->i_mode
= btrfs_inode_mode(leaf
, inode_item
);
3849 set_nlink(inode
, btrfs_inode_nlink(leaf
, inode_item
));
3850 i_uid_write(inode
, btrfs_inode_uid(leaf
, inode_item
));
3851 i_gid_write(inode
, btrfs_inode_gid(leaf
, inode_item
));
3852 btrfs_i_size_write(BTRFS_I(inode
), btrfs_inode_size(leaf
, inode_item
));
3853 btrfs_inode_set_file_extent_range(BTRFS_I(inode
), 0,
3854 round_up(i_size_read(inode
), fs_info
->sectorsize
));
3856 inode_set_atime(inode
, btrfs_timespec_sec(leaf
, &inode_item
->atime
),
3857 btrfs_timespec_nsec(leaf
, &inode_item
->atime
));
3859 inode_set_mtime(inode
, btrfs_timespec_sec(leaf
, &inode_item
->mtime
),
3860 btrfs_timespec_nsec(leaf
, &inode_item
->mtime
));
3862 inode_set_ctime(inode
, btrfs_timespec_sec(leaf
, &inode_item
->ctime
),
3863 btrfs_timespec_nsec(leaf
, &inode_item
->ctime
));
3865 BTRFS_I(inode
)->i_otime_sec
= btrfs_timespec_sec(leaf
, &inode_item
->otime
);
3866 BTRFS_I(inode
)->i_otime_nsec
= btrfs_timespec_nsec(leaf
, &inode_item
->otime
);
3868 inode_set_bytes(inode
, btrfs_inode_nbytes(leaf
, inode_item
));
3869 BTRFS_I(inode
)->generation
= btrfs_inode_generation(leaf
, inode_item
);
3870 BTRFS_I(inode
)->last_trans
= btrfs_inode_transid(leaf
, inode_item
);
3872 inode_set_iversion_queried(inode
,
3873 btrfs_inode_sequence(leaf
, inode_item
));
3874 inode
->i_generation
= BTRFS_I(inode
)->generation
;
3876 rdev
= btrfs_inode_rdev(leaf
, inode_item
);
3878 if (S_ISDIR(inode
->i_mode
))
3879 BTRFS_I(inode
)->index_cnt
= (u64
)-1;
3881 btrfs_inode_split_flags(btrfs_inode_flags(leaf
, inode_item
),
3882 &BTRFS_I(inode
)->flags
, &BTRFS_I(inode
)->ro_flags
);
3886 * If we were modified in the current generation and evicted from memory
3887 * and then re-read we need to do a full sync since we don't have any
3888 * idea about which extents were modified before we were evicted from
3891 * This is required for both inode re-read from disk and delayed inode
3892 * in the delayed_nodes xarray.
3894 if (BTRFS_I(inode
)->last_trans
== btrfs_get_fs_generation(fs_info
))
3895 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC
,
3896 &BTRFS_I(inode
)->runtime_flags
);
3899 * We don't persist the id of the transaction where an unlink operation
3900 * against the inode was last made. So here we assume the inode might
3901 * have been evicted, and therefore the exact value of last_unlink_trans
3902 * lost, and set it to last_trans to avoid metadata inconsistencies
3903 * between the inode and its parent if the inode is fsync'ed and the log
3904 * replayed. For example, in the scenario:
3907 * ln mydir/foo mydir/bar
3910 * echo 2 > /proc/sys/vm/drop_caches # evicts inode
3911 * xfs_io -c fsync mydir/foo
3913 * mount fs, triggers fsync log replay
3915 * We must make sure that when we fsync our inode foo we also log its
3916 * parent inode, otherwise after log replay the parent still has the
3917 * dentry with the "bar" name but our inode foo has a link count of 1
3918 * and doesn't have an inode ref with the name "bar" anymore.
3920 * Setting last_unlink_trans to last_trans is a pessimistic approach,
3921 * but it guarantees correctness at the expense of occasional full
3922 * transaction commits on fsync if our inode is a directory, or if our
3923 * inode is not a directory, logging its parent unnecessarily.
3925 BTRFS_I(inode
)->last_unlink_trans
= BTRFS_I(inode
)->last_trans
;
3928 * Same logic as for last_unlink_trans. We don't persist the generation
3929 * of the last transaction where this inode was used for a reflink
3930 * operation, so after eviction and reloading the inode we must be
3931 * pessimistic and assume the last transaction that modified the inode.
3933 BTRFS_I(inode
)->last_reflink_trans
= BTRFS_I(inode
)->last_trans
;
3936 if (inode
->i_nlink
!= 1 ||
3937 path
->slots
[0] >= btrfs_header_nritems(leaf
))
3940 btrfs_item_key_to_cpu(leaf
, &location
, path
->slots
[0]);
3941 if (location
.objectid
!= btrfs_ino(BTRFS_I(inode
)))
3944 ptr
= btrfs_item_ptr_offset(leaf
, path
->slots
[0]);
3945 if (location
.type
== BTRFS_INODE_REF_KEY
) {
3946 struct btrfs_inode_ref
*ref
;
3948 ref
= (struct btrfs_inode_ref
*)ptr
;
3949 BTRFS_I(inode
)->dir_index
= btrfs_inode_ref_index(leaf
, ref
);
3950 } else if (location
.type
== BTRFS_INODE_EXTREF_KEY
) {
3951 struct btrfs_inode_extref
*extref
;
3953 extref
= (struct btrfs_inode_extref
*)ptr
;
3954 BTRFS_I(inode
)->dir_index
= btrfs_inode_extref_index(leaf
,
3959 * try to precache a NULL acl entry for files that don't have
3960 * any xattrs or acls
3962 maybe_acls
= acls_after_inode_item(leaf
, path
->slots
[0],
3963 btrfs_ino(BTRFS_I(inode
)), &first_xattr_slot
);
3964 if (first_xattr_slot
!= -1) {
3965 path
->slots
[0] = first_xattr_slot
;
3966 ret
= btrfs_load_inode_props(inode
, path
);
3969 "error loading props for ino %llu (root %llu): %d",
3970 btrfs_ino(BTRFS_I(inode
)),
3971 btrfs_root_id(root
), ret
);
3975 cache_no_acl(inode
);
3977 switch (inode
->i_mode
& S_IFMT
) {
3979 inode
->i_mapping
->a_ops
= &btrfs_aops
;
3980 inode
->i_fop
= &btrfs_file_operations
;
3981 inode
->i_op
= &btrfs_file_inode_operations
;
3984 inode
->i_fop
= &btrfs_dir_file_operations
;
3985 inode
->i_op
= &btrfs_dir_inode_operations
;
3988 inode
->i_op
= &btrfs_symlink_inode_operations
;
3989 inode_nohighmem(inode
);
3990 inode
->i_mapping
->a_ops
= &btrfs_aops
;
3993 inode
->i_op
= &btrfs_special_inode_operations
;
3994 init_special_inode(inode
, inode
->i_mode
, rdev
);
3998 btrfs_sync_inode_flags_to_i_flags(inode
);
4000 ret
= btrfs_add_inode_to_root(BTRFS_I(inode
), true);
4011 * given a leaf and an inode, copy the inode fields into the leaf
4013 static void fill_inode_item(struct btrfs_trans_handle
*trans
,
4014 struct extent_buffer
*leaf
,
4015 struct btrfs_inode_item
*item
,
4016 struct inode
*inode
)
4018 struct btrfs_map_token token
;
4021 btrfs_init_map_token(&token
, leaf
);
4023 btrfs_set_token_inode_uid(&token
, item
, i_uid_read(inode
));
4024 btrfs_set_token_inode_gid(&token
, item
, i_gid_read(inode
));
4025 btrfs_set_token_inode_size(&token
, item
, BTRFS_I(inode
)->disk_i_size
);
4026 btrfs_set_token_inode_mode(&token
, item
, inode
->i_mode
);
4027 btrfs_set_token_inode_nlink(&token
, item
, inode
->i_nlink
);
4029 btrfs_set_token_timespec_sec(&token
, &item
->atime
,
4030 inode_get_atime_sec(inode
));
4031 btrfs_set_token_timespec_nsec(&token
, &item
->atime
,
4032 inode_get_atime_nsec(inode
));
4034 btrfs_set_token_timespec_sec(&token
, &item
->mtime
,
4035 inode_get_mtime_sec(inode
));
4036 btrfs_set_token_timespec_nsec(&token
, &item
->mtime
,
4037 inode_get_mtime_nsec(inode
));
4039 btrfs_set_token_timespec_sec(&token
, &item
->ctime
,
4040 inode_get_ctime_sec(inode
));
4041 btrfs_set_token_timespec_nsec(&token
, &item
->ctime
,
4042 inode_get_ctime_nsec(inode
));
4044 btrfs_set_token_timespec_sec(&token
, &item
->otime
, BTRFS_I(inode
)->i_otime_sec
);
4045 btrfs_set_token_timespec_nsec(&token
, &item
->otime
, BTRFS_I(inode
)->i_otime_nsec
);
4047 btrfs_set_token_inode_nbytes(&token
, item
, inode_get_bytes(inode
));
4048 btrfs_set_token_inode_generation(&token
, item
,
4049 BTRFS_I(inode
)->generation
);
4050 btrfs_set_token_inode_sequence(&token
, item
, inode_peek_iversion(inode
));
4051 btrfs_set_token_inode_transid(&token
, item
, trans
->transid
);
4052 btrfs_set_token_inode_rdev(&token
, item
, inode
->i_rdev
);
4053 flags
= btrfs_inode_combine_flags(BTRFS_I(inode
)->flags
,
4054 BTRFS_I(inode
)->ro_flags
);
4055 btrfs_set_token_inode_flags(&token
, item
, flags
);
4056 btrfs_set_token_inode_block_group(&token
, item
, 0);
4060 * copy everything in the in-memory inode into the btree.
4062 static noinline
int btrfs_update_inode_item(struct btrfs_trans_handle
*trans
,
4063 struct btrfs_inode
*inode
)
4065 struct btrfs_inode_item
*inode_item
;
4066 struct btrfs_path
*path
;
4067 struct extent_buffer
*leaf
;
4068 struct btrfs_key key
;
4071 path
= btrfs_alloc_path();
4075 btrfs_get_inode_key(inode
, &key
);
4076 ret
= btrfs_lookup_inode(trans
, inode
->root
, path
, &key
, 1);
4083 leaf
= path
->nodes
[0];
4084 inode_item
= btrfs_item_ptr(leaf
, path
->slots
[0],
4085 struct btrfs_inode_item
);
4087 fill_inode_item(trans
, leaf
, inode_item
, &inode
->vfs_inode
);
4088 btrfs_mark_buffer_dirty(trans
, leaf
);
4089 btrfs_set_inode_last_trans(trans
, inode
);
4092 btrfs_free_path(path
);
4097 * copy everything in the in-memory inode into the btree.
4099 int btrfs_update_inode(struct btrfs_trans_handle
*trans
,
4100 struct btrfs_inode
*inode
)
4102 struct btrfs_root
*root
= inode
->root
;
4103 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
4107 * If the inode is a free space inode, we can deadlock during commit
4108 * if we put it into the delayed code.
4110 * The data relocation inode should also be directly updated
4113 if (!btrfs_is_free_space_inode(inode
)
4114 && !btrfs_is_data_reloc_root(root
)
4115 && !test_bit(BTRFS_FS_LOG_RECOVERING
, &fs_info
->flags
)) {
4116 btrfs_update_root_times(trans
, root
);
4118 ret
= btrfs_delayed_update_inode(trans
, inode
);
4120 btrfs_set_inode_last_trans(trans
, inode
);
4124 return btrfs_update_inode_item(trans
, inode
);
4127 int btrfs_update_inode_fallback(struct btrfs_trans_handle
*trans
,
4128 struct btrfs_inode
*inode
)
4132 ret
= btrfs_update_inode(trans
, inode
);
4134 return btrfs_update_inode_item(trans
, inode
);
4139 * unlink helper that gets used here in inode.c and in the tree logging
4140 * recovery code. It remove a link in a directory with a given name, and
4141 * also drops the back refs in the inode to the directory
4143 static int __btrfs_unlink_inode(struct btrfs_trans_handle
*trans
,
4144 struct btrfs_inode
*dir
,
4145 struct btrfs_inode
*inode
,
4146 const struct fscrypt_str
*name
,
4147 struct btrfs_rename_ctx
*rename_ctx
)
4149 struct btrfs_root
*root
= dir
->root
;
4150 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
4151 struct btrfs_path
*path
;
4153 struct btrfs_dir_item
*di
;
4155 u64 ino
= btrfs_ino(inode
);
4156 u64 dir_ino
= btrfs_ino(dir
);
4158 path
= btrfs_alloc_path();
4164 di
= btrfs_lookup_dir_item(trans
, root
, path
, dir_ino
, name
, -1);
4165 if (IS_ERR_OR_NULL(di
)) {
4166 ret
= di
? PTR_ERR(di
) : -ENOENT
;
4169 ret
= btrfs_delete_one_dir_name(trans
, root
, path
, di
);
4172 btrfs_release_path(path
);
4175 * If we don't have dir index, we have to get it by looking up
4176 * the inode ref, since we get the inode ref, remove it directly,
4177 * it is unnecessary to do delayed deletion.
4179 * But if we have dir index, needn't search inode ref to get it.
4180 * Since the inode ref is close to the inode item, it is better
4181 * that we delay to delete it, and just do this deletion when
4182 * we update the inode item.
4184 if (inode
->dir_index
) {
4185 ret
= btrfs_delayed_delete_inode_ref(inode
);
4187 index
= inode
->dir_index
;
4192 ret
= btrfs_del_inode_ref(trans
, root
, name
, ino
, dir_ino
, &index
);
4195 "failed to delete reference to %.*s, inode %llu parent %llu",
4196 name
->len
, name
->name
, ino
, dir_ino
);
4197 btrfs_abort_transaction(trans
, ret
);
4202 rename_ctx
->index
= index
;
4204 ret
= btrfs_delete_delayed_dir_index(trans
, dir
, index
);
4206 btrfs_abort_transaction(trans
, ret
);
4211 * If we are in a rename context, we don't need to update anything in the
4212 * log. That will be done later during the rename by btrfs_log_new_name().
4213 * Besides that, doing it here would only cause extra unnecessary btree
4214 * operations on the log tree, increasing latency for applications.
4217 btrfs_del_inode_ref_in_log(trans
, root
, name
, inode
, dir_ino
);
4218 btrfs_del_dir_entries_in_log(trans
, root
, name
, dir
, index
);
4222 * If we have a pending delayed iput we could end up with the final iput
4223 * being run in btrfs-cleaner context. If we have enough of these built
4224 * up we can end up burning a lot of time in btrfs-cleaner without any
4225 * way to throttle the unlinks. Since we're currently holding a ref on
4226 * the inode we can run the delayed iput here without any issues as the
4227 * final iput won't be done until after we drop the ref we're currently
4230 btrfs_run_delayed_iput(fs_info
, inode
);
4232 btrfs_free_path(path
);
4236 btrfs_i_size_write(dir
, dir
->vfs_inode
.i_size
- name
->len
* 2);
4237 inode_inc_iversion(&inode
->vfs_inode
);
4238 inode_set_ctime_current(&inode
->vfs_inode
);
4239 inode_inc_iversion(&dir
->vfs_inode
);
4240 inode_set_mtime_to_ts(&dir
->vfs_inode
, inode_set_ctime_current(&dir
->vfs_inode
));
4241 ret
= btrfs_update_inode(trans
, dir
);
4246 int btrfs_unlink_inode(struct btrfs_trans_handle
*trans
,
4247 struct btrfs_inode
*dir
, struct btrfs_inode
*inode
,
4248 const struct fscrypt_str
*name
)
4252 ret
= __btrfs_unlink_inode(trans
, dir
, inode
, name
, NULL
);
4254 drop_nlink(&inode
->vfs_inode
);
4255 ret
= btrfs_update_inode(trans
, inode
);
4261 * helper to start transaction for unlink and rmdir.
4263 * unlink and rmdir are special in btrfs, they do not always free space, so
4264 * if we cannot make our reservations the normal way try and see if there is
4265 * plenty of slack room in the global reserve to migrate, otherwise we cannot
4266 * allow the unlink to occur.
4268 static struct btrfs_trans_handle
*__unlink_start_trans(struct btrfs_inode
*dir
)
4270 struct btrfs_root
*root
= dir
->root
;
4272 return btrfs_start_transaction_fallback_global_rsv(root
,
4273 BTRFS_UNLINK_METADATA_UNITS
);
4276 static int btrfs_unlink(struct inode
*dir
, struct dentry
*dentry
)
4278 struct btrfs_trans_handle
*trans
;
4279 struct inode
*inode
= d_inode(dentry
);
4281 struct fscrypt_name fname
;
4283 ret
= fscrypt_setup_filename(dir
, &dentry
->d_name
, 1, &fname
);
4287 /* This needs to handle no-key deletions later on */
4289 trans
= __unlink_start_trans(BTRFS_I(dir
));
4290 if (IS_ERR(trans
)) {
4291 ret
= PTR_ERR(trans
);
4295 btrfs_record_unlink_dir(trans
, BTRFS_I(dir
), BTRFS_I(d_inode(dentry
)),
4298 ret
= btrfs_unlink_inode(trans
, BTRFS_I(dir
), BTRFS_I(d_inode(dentry
)),
4303 if (inode
->i_nlink
== 0) {
4304 ret
= btrfs_orphan_add(trans
, BTRFS_I(inode
));
4310 btrfs_end_transaction(trans
);
4311 btrfs_btree_balance_dirty(BTRFS_I(dir
)->root
->fs_info
);
4313 fscrypt_free_filename(&fname
);
4317 static int btrfs_unlink_subvol(struct btrfs_trans_handle
*trans
,
4318 struct btrfs_inode
*dir
, struct dentry
*dentry
)
4320 struct btrfs_root
*root
= dir
->root
;
4321 struct btrfs_inode
*inode
= BTRFS_I(d_inode(dentry
));
4322 struct btrfs_path
*path
;
4323 struct extent_buffer
*leaf
;
4324 struct btrfs_dir_item
*di
;
4325 struct btrfs_key key
;
4329 u64 dir_ino
= btrfs_ino(dir
);
4330 struct fscrypt_name fname
;
4332 ret
= fscrypt_setup_filename(&dir
->vfs_inode
, &dentry
->d_name
, 1, &fname
);
4336 /* This needs to handle no-key deletions later on */
4338 if (btrfs_ino(inode
) == BTRFS_FIRST_FREE_OBJECTID
) {
4339 objectid
= btrfs_root_id(inode
->root
);
4340 } else if (btrfs_ino(inode
) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID
) {
4341 objectid
= inode
->ref_root_id
;
4344 fscrypt_free_filename(&fname
);
4348 path
= btrfs_alloc_path();
4354 di
= btrfs_lookup_dir_item(trans
, root
, path
, dir_ino
,
4355 &fname
.disk_name
, -1);
4356 if (IS_ERR_OR_NULL(di
)) {
4357 ret
= di
? PTR_ERR(di
) : -ENOENT
;
4361 leaf
= path
->nodes
[0];
4362 btrfs_dir_item_key_to_cpu(leaf
, di
, &key
);
4363 WARN_ON(key
.type
!= BTRFS_ROOT_ITEM_KEY
|| key
.objectid
!= objectid
);
4364 ret
= btrfs_delete_one_dir_name(trans
, root
, path
, di
);
4366 btrfs_abort_transaction(trans
, ret
);
4369 btrfs_release_path(path
);
4372 * This is a placeholder inode for a subvolume we didn't have a
4373 * reference to at the time of the snapshot creation. In the meantime
4374 * we could have renamed the real subvol link into our snapshot, so
4375 * depending on btrfs_del_root_ref to return -ENOENT here is incorrect.
4376 * Instead simply lookup the dir_index_item for this entry so we can
4377 * remove it. Otherwise we know we have a ref to the root and we can
4378 * call btrfs_del_root_ref, and it _shouldn't_ fail.
4380 if (btrfs_ino(inode
) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID
) {
4381 di
= btrfs_search_dir_index_item(root
, path
, dir_ino
, &fname
.disk_name
);
4384 btrfs_abort_transaction(trans
, ret
);
4388 leaf
= path
->nodes
[0];
4389 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
4391 btrfs_release_path(path
);
4393 ret
= btrfs_del_root_ref(trans
, objectid
,
4394 btrfs_root_id(root
), dir_ino
,
4395 &index
, &fname
.disk_name
);
4397 btrfs_abort_transaction(trans
, ret
);
4402 ret
= btrfs_delete_delayed_dir_index(trans
, dir
, index
);
4404 btrfs_abort_transaction(trans
, ret
);
4408 btrfs_i_size_write(dir
, dir
->vfs_inode
.i_size
- fname
.disk_name
.len
* 2);
4409 inode_inc_iversion(&dir
->vfs_inode
);
4410 inode_set_mtime_to_ts(&dir
->vfs_inode
, inode_set_ctime_current(&dir
->vfs_inode
));
4411 ret
= btrfs_update_inode_fallback(trans
, dir
);
4413 btrfs_abort_transaction(trans
, ret
);
4415 btrfs_free_path(path
);
4416 fscrypt_free_filename(&fname
);
4421 * Helper to check if the subvolume references other subvolumes or if it's
4424 static noinline
int may_destroy_subvol(struct btrfs_root
*root
)
4426 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
4427 struct btrfs_path
*path
;
4428 struct btrfs_dir_item
*di
;
4429 struct btrfs_key key
;
4430 struct fscrypt_str name
= FSTR_INIT("default", 7);
4434 path
= btrfs_alloc_path();
4438 /* Make sure this root isn't set as the default subvol */
4439 dir_id
= btrfs_super_root_dir(fs_info
->super_copy
);
4440 di
= btrfs_lookup_dir_item(NULL
, fs_info
->tree_root
, path
,
4442 if (di
&& !IS_ERR(di
)) {
4443 btrfs_dir_item_key_to_cpu(path
->nodes
[0], di
, &key
);
4444 if (key
.objectid
== btrfs_root_id(root
)) {
4447 "deleting default subvolume %llu is not allowed",
4451 btrfs_release_path(path
);
4454 key
.objectid
= btrfs_root_id(root
);
4455 key
.type
= BTRFS_ROOT_REF_KEY
;
4456 key
.offset
= (u64
)-1;
4458 ret
= btrfs_search_slot(NULL
, fs_info
->tree_root
, &key
, path
, 0, 0);
4463 * Key with offset -1 found, there would have to exist a root
4464 * with such id, but this is out of valid range.
4471 if (path
->slots
[0] > 0) {
4473 btrfs_item_key_to_cpu(path
->nodes
[0], &key
, path
->slots
[0]);
4474 if (key
.objectid
== btrfs_root_id(root
) && key
.type
== BTRFS_ROOT_REF_KEY
)
4478 btrfs_free_path(path
);
4482 /* Delete all dentries for inodes belonging to the root */
4483 static void btrfs_prune_dentries(struct btrfs_root
*root
)
4485 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
4486 struct btrfs_inode
*inode
;
4489 if (!BTRFS_FS_ERROR(fs_info
))
4490 WARN_ON(btrfs_root_refs(&root
->root_item
) != 0);
4492 inode
= btrfs_find_first_inode(root
, min_ino
);
4494 if (atomic_read(&inode
->vfs_inode
.i_count
) > 1)
4495 d_prune_aliases(&inode
->vfs_inode
);
4497 min_ino
= btrfs_ino(inode
) + 1;
4499 * btrfs_drop_inode() will have it removed from the inode
4500 * cache when its usage count hits zero.
4502 iput(&inode
->vfs_inode
);
4504 inode
= btrfs_find_first_inode(root
, min_ino
);
4508 int btrfs_delete_subvolume(struct btrfs_inode
*dir
, struct dentry
*dentry
)
4510 struct btrfs_root
*root
= dir
->root
;
4511 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
4512 struct inode
*inode
= d_inode(dentry
);
4513 struct btrfs_root
*dest
= BTRFS_I(inode
)->root
;
4514 struct btrfs_trans_handle
*trans
;
4515 struct btrfs_block_rsv block_rsv
;
4517 u64 qgroup_reserved
= 0;
4520 down_write(&fs_info
->subvol_sem
);
4523 * Don't allow to delete a subvolume with send in progress. This is
4524 * inside the inode lock so the error handling that has to drop the bit
4525 * again is not run concurrently.
4527 spin_lock(&dest
->root_item_lock
);
4528 if (dest
->send_in_progress
) {
4529 spin_unlock(&dest
->root_item_lock
);
4531 "attempt to delete subvolume %llu during send",
4532 btrfs_root_id(dest
));
4536 if (atomic_read(&dest
->nr_swapfiles
)) {
4537 spin_unlock(&dest
->root_item_lock
);
4539 "attempt to delete subvolume %llu with active swapfile",
4540 btrfs_root_id(root
));
4544 root_flags
= btrfs_root_flags(&dest
->root_item
);
4545 btrfs_set_root_flags(&dest
->root_item
,
4546 root_flags
| BTRFS_ROOT_SUBVOL_DEAD
);
4547 spin_unlock(&dest
->root_item_lock
);
4549 ret
= may_destroy_subvol(dest
);
4553 btrfs_init_block_rsv(&block_rsv
, BTRFS_BLOCK_RSV_TEMP
);
4555 * One for dir inode,
4556 * two for dir entries,
4557 * two for root ref/backref.
4559 ret
= btrfs_subvolume_reserve_metadata(root
, &block_rsv
, 5, true);
4562 qgroup_reserved
= block_rsv
.qgroup_rsv_reserved
;
4564 trans
= btrfs_start_transaction(root
, 0);
4565 if (IS_ERR(trans
)) {
4566 ret
= PTR_ERR(trans
);
4569 btrfs_qgroup_convert_reserved_meta(root
, qgroup_reserved
);
4570 qgroup_reserved
= 0;
4571 trans
->block_rsv
= &block_rsv
;
4572 trans
->bytes_reserved
= block_rsv
.size
;
4574 btrfs_record_snapshot_destroy(trans
, dir
);
4576 ret
= btrfs_unlink_subvol(trans
, dir
, dentry
);
4578 btrfs_abort_transaction(trans
, ret
);
4582 ret
= btrfs_record_root_in_trans(trans
, dest
);
4584 btrfs_abort_transaction(trans
, ret
);
4588 memset(&dest
->root_item
.drop_progress
, 0,
4589 sizeof(dest
->root_item
.drop_progress
));
4590 btrfs_set_root_drop_level(&dest
->root_item
, 0);
4591 btrfs_set_root_refs(&dest
->root_item
, 0);
4593 if (!test_and_set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED
, &dest
->state
)) {
4594 ret
= btrfs_insert_orphan_item(trans
,
4596 btrfs_root_id(dest
));
4598 btrfs_abort_transaction(trans
, ret
);
4603 ret
= btrfs_uuid_tree_remove(trans
, dest
->root_item
.uuid
,
4604 BTRFS_UUID_KEY_SUBVOL
, btrfs_root_id(dest
));
4605 if (ret
&& ret
!= -ENOENT
) {
4606 btrfs_abort_transaction(trans
, ret
);
4609 if (!btrfs_is_empty_uuid(dest
->root_item
.received_uuid
)) {
4610 ret
= btrfs_uuid_tree_remove(trans
,
4611 dest
->root_item
.received_uuid
,
4612 BTRFS_UUID_KEY_RECEIVED_SUBVOL
,
4613 btrfs_root_id(dest
));
4614 if (ret
&& ret
!= -ENOENT
) {
4615 btrfs_abort_transaction(trans
, ret
);
4620 free_anon_bdev(dest
->anon_dev
);
4623 trans
->block_rsv
= NULL
;
4624 trans
->bytes_reserved
= 0;
4625 ret
= btrfs_end_transaction(trans
);
4626 inode
->i_flags
|= S_DEAD
;
4628 btrfs_block_rsv_release(fs_info
, &block_rsv
, (u64
)-1, NULL
);
4629 if (qgroup_reserved
)
4630 btrfs_qgroup_free_meta_prealloc(root
, qgroup_reserved
);
4633 spin_lock(&dest
->root_item_lock
);
4634 root_flags
= btrfs_root_flags(&dest
->root_item
);
4635 btrfs_set_root_flags(&dest
->root_item
,
4636 root_flags
& ~BTRFS_ROOT_SUBVOL_DEAD
);
4637 spin_unlock(&dest
->root_item_lock
);
4640 up_write(&fs_info
->subvol_sem
);
4642 d_invalidate(dentry
);
4643 btrfs_prune_dentries(dest
);
4644 ASSERT(dest
->send_in_progress
== 0);
4650 static int btrfs_rmdir(struct inode
*dir
, struct dentry
*dentry
)
4652 struct inode
*inode
= d_inode(dentry
);
4653 struct btrfs_fs_info
*fs_info
= BTRFS_I(inode
)->root
->fs_info
;
4655 struct btrfs_trans_handle
*trans
;
4656 u64 last_unlink_trans
;
4657 struct fscrypt_name fname
;
4659 if (inode
->i_size
> BTRFS_EMPTY_DIR_SIZE
)
4661 if (btrfs_ino(BTRFS_I(inode
)) == BTRFS_FIRST_FREE_OBJECTID
) {
4662 if (unlikely(btrfs_fs_incompat(fs_info
, EXTENT_TREE_V2
))) {
4664 "extent tree v2 doesn't support snapshot deletion yet");
4667 return btrfs_delete_subvolume(BTRFS_I(dir
), dentry
);
4670 ret
= fscrypt_setup_filename(dir
, &dentry
->d_name
, 1, &fname
);
4674 /* This needs to handle no-key deletions later on */
4676 trans
= __unlink_start_trans(BTRFS_I(dir
));
4677 if (IS_ERR(trans
)) {
4678 ret
= PTR_ERR(trans
);
4682 if (unlikely(btrfs_ino(BTRFS_I(inode
)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID
)) {
4683 ret
= btrfs_unlink_subvol(trans
, BTRFS_I(dir
), dentry
);
4687 ret
= btrfs_orphan_add(trans
, BTRFS_I(inode
));
4691 last_unlink_trans
= BTRFS_I(inode
)->last_unlink_trans
;
4693 /* now the directory is empty */
4694 ret
= btrfs_unlink_inode(trans
, BTRFS_I(dir
), BTRFS_I(d_inode(dentry
)),
4697 btrfs_i_size_write(BTRFS_I(inode
), 0);
4699 * Propagate the last_unlink_trans value of the deleted dir to
4700 * its parent directory. This is to prevent an unrecoverable
4701 * log tree in the case we do something like this:
4703 * 2) create snapshot under dir foo
4704 * 3) delete the snapshot
4707 * 6) fsync foo or some file inside foo
4709 if (last_unlink_trans
>= trans
->transid
)
4710 BTRFS_I(dir
)->last_unlink_trans
= last_unlink_trans
;
4713 btrfs_end_transaction(trans
);
4715 btrfs_btree_balance_dirty(fs_info
);
4716 fscrypt_free_filename(&fname
);
4722 * Read, zero a chunk and write a block.
4724 * @inode - inode that we're zeroing
4725 * @from - the offset to start zeroing
4726 * @len - the length to zero, 0 to zero the entire range respective to the
4728 * @front - zero up to the offset instead of from the offset on
4730 * This will find the block for the "from" offset and cow the block and zero the
4731 * part we want to zero. This is used with truncate and hole punching.
4733 int btrfs_truncate_block(struct btrfs_inode
*inode
, loff_t from
, loff_t len
,
4736 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
4737 struct address_space
*mapping
= inode
->vfs_inode
.i_mapping
;
4738 struct extent_io_tree
*io_tree
= &inode
->io_tree
;
4739 struct btrfs_ordered_extent
*ordered
;
4740 struct extent_state
*cached_state
= NULL
;
4741 struct extent_changeset
*data_reserved
= NULL
;
4742 bool only_release_metadata
= false;
4743 u32 blocksize
= fs_info
->sectorsize
;
4744 pgoff_t index
= from
>> PAGE_SHIFT
;
4745 unsigned offset
= from
& (blocksize
- 1);
4746 struct folio
*folio
;
4747 gfp_t mask
= btrfs_alloc_write_mask(mapping
);
4748 size_t write_bytes
= blocksize
;
4753 if (IS_ALIGNED(offset
, blocksize
) &&
4754 (!len
|| IS_ALIGNED(len
, blocksize
)))
4757 block_start
= round_down(from
, blocksize
);
4758 block_end
= block_start
+ blocksize
- 1;
4760 ret
= btrfs_check_data_free_space(inode
, &data_reserved
, block_start
,
4763 if (btrfs_check_nocow_lock(inode
, block_start
, &write_bytes
, false) > 0) {
4764 /* For nocow case, no need to reserve data space */
4765 only_release_metadata
= true;
4770 ret
= btrfs_delalloc_reserve_metadata(inode
, blocksize
, blocksize
, false);
4772 if (!only_release_metadata
)
4773 btrfs_free_reserved_data_space(inode
, data_reserved
,
4774 block_start
, blocksize
);
4778 folio
= __filemap_get_folio(mapping
, index
,
4779 FGP_LOCK
| FGP_ACCESSED
| FGP_CREAT
, mask
);
4780 if (IS_ERR(folio
)) {
4781 btrfs_delalloc_release_space(inode
, data_reserved
, block_start
,
4783 btrfs_delalloc_release_extents(inode
, blocksize
);
4788 if (!folio_test_uptodate(folio
)) {
4789 ret
= btrfs_read_folio(NULL
, folio
);
4791 if (folio
->mapping
!= mapping
) {
4792 folio_unlock(folio
);
4796 if (!folio_test_uptodate(folio
)) {
4803 * We unlock the page after the io is completed and then re-lock it
4804 * above. release_folio() could have come in between that and cleared
4805 * folio private, but left the page in the mapping. Set the page mapped
4806 * here to make sure it's properly set for the subpage stuff.
4808 ret
= set_folio_extent_mapped(folio
);
4812 folio_wait_writeback(folio
);
4814 lock_extent(io_tree
, block_start
, block_end
, &cached_state
);
4816 ordered
= btrfs_lookup_ordered_extent(inode
, block_start
);
4818 unlock_extent(io_tree
, block_start
, block_end
, &cached_state
);
4819 folio_unlock(folio
);
4821 btrfs_start_ordered_extent(ordered
);
4822 btrfs_put_ordered_extent(ordered
);
4826 clear_extent_bit(&inode
->io_tree
, block_start
, block_end
,
4827 EXTENT_DELALLOC
| EXTENT_DO_ACCOUNTING
| EXTENT_DEFRAG
,
4830 ret
= btrfs_set_extent_delalloc(inode
, block_start
, block_end
, 0,
4833 unlock_extent(io_tree
, block_start
, block_end
, &cached_state
);
4837 if (offset
!= blocksize
) {
4839 len
= blocksize
- offset
;
4841 folio_zero_range(folio
, block_start
- folio_pos(folio
),
4844 folio_zero_range(folio
,
4845 (block_start
- folio_pos(folio
)) + offset
,
4848 btrfs_folio_clear_checked(fs_info
, folio
, block_start
,
4849 block_end
+ 1 - block_start
);
4850 btrfs_folio_set_dirty(fs_info
, folio
, block_start
,
4851 block_end
+ 1 - block_start
);
4852 unlock_extent(io_tree
, block_start
, block_end
, &cached_state
);
4854 if (only_release_metadata
)
4855 set_extent_bit(&inode
->io_tree
, block_start
, block_end
,
4856 EXTENT_NORESERVE
, NULL
);
4860 if (only_release_metadata
)
4861 btrfs_delalloc_release_metadata(inode
, blocksize
, true);
4863 btrfs_delalloc_release_space(inode
, data_reserved
,
4864 block_start
, blocksize
, true);
4866 btrfs_delalloc_release_extents(inode
, blocksize
);
4867 folio_unlock(folio
);
4870 if (only_release_metadata
)
4871 btrfs_check_nocow_unlock(inode
);
4872 extent_changeset_free(data_reserved
);
4876 static int maybe_insert_hole(struct btrfs_inode
*inode
, u64 offset
, u64 len
)
4878 struct btrfs_root
*root
= inode
->root
;
4879 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
4880 struct btrfs_trans_handle
*trans
;
4881 struct btrfs_drop_extents_args drop_args
= { 0 };
4885 * If NO_HOLES is enabled, we don't need to do anything.
4886 * Later, up in the call chain, either btrfs_set_inode_last_sub_trans()
4887 * or btrfs_update_inode() will be called, which guarantee that the next
4888 * fsync will know this inode was changed and needs to be logged.
4890 if (btrfs_fs_incompat(fs_info
, NO_HOLES
))
4894 * 1 - for the one we're dropping
4895 * 1 - for the one we're adding
4896 * 1 - for updating the inode.
4898 trans
= btrfs_start_transaction(root
, 3);
4900 return PTR_ERR(trans
);
4902 drop_args
.start
= offset
;
4903 drop_args
.end
= offset
+ len
;
4904 drop_args
.drop_cache
= true;
4906 ret
= btrfs_drop_extents(trans
, root
, inode
, &drop_args
);
4908 btrfs_abort_transaction(trans
, ret
);
4909 btrfs_end_transaction(trans
);
4913 ret
= btrfs_insert_hole_extent(trans
, root
, btrfs_ino(inode
), offset
, len
);
4915 btrfs_abort_transaction(trans
, ret
);
4917 btrfs_update_inode_bytes(inode
, 0, drop_args
.bytes_found
);
4918 btrfs_update_inode(trans
, inode
);
4920 btrfs_end_transaction(trans
);
4925 * This function puts in dummy file extents for the area we're creating a hole
4926 * for. So if we are truncating this file to a larger size we need to insert
4927 * these file extents so that btrfs_get_extent will return a EXTENT_MAP_HOLE for
4928 * the range between oldsize and size
4930 int btrfs_cont_expand(struct btrfs_inode
*inode
, loff_t oldsize
, loff_t size
)
4932 struct btrfs_root
*root
= inode
->root
;
4933 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
4934 struct extent_io_tree
*io_tree
= &inode
->io_tree
;
4935 struct extent_map
*em
= NULL
;
4936 struct extent_state
*cached_state
= NULL
;
4937 u64 hole_start
= ALIGN(oldsize
, fs_info
->sectorsize
);
4938 u64 block_end
= ALIGN(size
, fs_info
->sectorsize
);
4945 * If our size started in the middle of a block we need to zero out the
4946 * rest of the block before we expand the i_size, otherwise we could
4947 * expose stale data.
4949 ret
= btrfs_truncate_block(inode
, oldsize
, 0, 0);
4953 if (size
<= hole_start
)
4956 btrfs_lock_and_flush_ordered_range(inode
, hole_start
, block_end
- 1,
4958 cur_offset
= hole_start
;
4960 em
= btrfs_get_extent(inode
, NULL
, cur_offset
, block_end
- cur_offset
);
4966 last_byte
= min(extent_map_end(em
), block_end
);
4967 last_byte
= ALIGN(last_byte
, fs_info
->sectorsize
);
4968 hole_size
= last_byte
- cur_offset
;
4970 if (!(em
->flags
& EXTENT_FLAG_PREALLOC
)) {
4971 struct extent_map
*hole_em
;
4973 ret
= maybe_insert_hole(inode
, cur_offset
, hole_size
);
4977 ret
= btrfs_inode_set_file_extent_range(inode
,
4978 cur_offset
, hole_size
);
4982 hole_em
= alloc_extent_map();
4984 btrfs_drop_extent_map_range(inode
, cur_offset
,
4985 cur_offset
+ hole_size
- 1,
4987 btrfs_set_inode_full_sync(inode
);
4990 hole_em
->start
= cur_offset
;
4991 hole_em
->len
= hole_size
;
4993 hole_em
->disk_bytenr
= EXTENT_MAP_HOLE
;
4994 hole_em
->disk_num_bytes
= 0;
4995 hole_em
->ram_bytes
= hole_size
;
4996 hole_em
->generation
= btrfs_get_fs_generation(fs_info
);
4998 ret
= btrfs_replace_extent_map_range(inode
, hole_em
, true);
4999 free_extent_map(hole_em
);
5001 ret
= btrfs_inode_set_file_extent_range(inode
,
5002 cur_offset
, hole_size
);
5007 free_extent_map(em
);
5009 cur_offset
= last_byte
;
5010 if (cur_offset
>= block_end
)
5013 free_extent_map(em
);
5014 unlock_extent(io_tree
, hole_start
, block_end
- 1, &cached_state
);
5018 static int btrfs_setsize(struct inode
*inode
, struct iattr
*attr
)
5020 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
5021 struct btrfs_trans_handle
*trans
;
5022 loff_t oldsize
= i_size_read(inode
);
5023 loff_t newsize
= attr
->ia_size
;
5024 int mask
= attr
->ia_valid
;
5028 * The regular truncate() case without ATTR_CTIME and ATTR_MTIME is a
5029 * special case where we need to update the times despite not having
5030 * these flags set. For all other operations the VFS set these flags
5031 * explicitly if it wants a timestamp update.
5033 if (newsize
!= oldsize
) {
5034 inode_inc_iversion(inode
);
5035 if (!(mask
& (ATTR_CTIME
| ATTR_MTIME
))) {
5036 inode_set_mtime_to_ts(inode
,
5037 inode_set_ctime_current(inode
));
5041 if (newsize
> oldsize
) {
5043 * Don't do an expanding truncate while snapshotting is ongoing.
5044 * This is to ensure the snapshot captures a fully consistent
5045 * state of this file - if the snapshot captures this expanding
5046 * truncation, it must capture all writes that happened before
5049 btrfs_drew_write_lock(&root
->snapshot_lock
);
5050 ret
= btrfs_cont_expand(BTRFS_I(inode
), oldsize
, newsize
);
5052 btrfs_drew_write_unlock(&root
->snapshot_lock
);
5056 trans
= btrfs_start_transaction(root
, 1);
5057 if (IS_ERR(trans
)) {
5058 btrfs_drew_write_unlock(&root
->snapshot_lock
);
5059 return PTR_ERR(trans
);
5062 i_size_write(inode
, newsize
);
5063 btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode
), 0);
5064 pagecache_isize_extended(inode
, oldsize
, newsize
);
5065 ret
= btrfs_update_inode(trans
, BTRFS_I(inode
));
5066 btrfs_drew_write_unlock(&root
->snapshot_lock
);
5067 btrfs_end_transaction(trans
);
5069 struct btrfs_fs_info
*fs_info
= inode_to_fs_info(inode
);
5071 if (btrfs_is_zoned(fs_info
)) {
5072 ret
= btrfs_wait_ordered_range(BTRFS_I(inode
),
5073 ALIGN(newsize
, fs_info
->sectorsize
),
5080 * We're truncating a file that used to have good data down to
5081 * zero. Make sure any new writes to the file get on disk
5085 set_bit(BTRFS_INODE_FLUSH_ON_CLOSE
,
5086 &BTRFS_I(inode
)->runtime_flags
);
5088 truncate_setsize(inode
, newsize
);
5090 inode_dio_wait(inode
);
5092 ret
= btrfs_truncate(BTRFS_I(inode
), newsize
== oldsize
);
5093 if (ret
&& inode
->i_nlink
) {
5097 * Truncate failed, so fix up the in-memory size. We
5098 * adjusted disk_i_size down as we removed extents, so
5099 * wait for disk_i_size to be stable and then update the
5100 * in-memory size to match.
5102 err
= btrfs_wait_ordered_range(BTRFS_I(inode
), 0, (u64
)-1);
5105 i_size_write(inode
, BTRFS_I(inode
)->disk_i_size
);
5112 static int btrfs_setattr(struct mnt_idmap
*idmap
, struct dentry
*dentry
,
5115 struct inode
*inode
= d_inode(dentry
);
5116 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
5119 if (btrfs_root_readonly(root
))
5122 err
= setattr_prepare(idmap
, dentry
, attr
);
5126 if (S_ISREG(inode
->i_mode
) && (attr
->ia_valid
& ATTR_SIZE
)) {
5127 err
= btrfs_setsize(inode
, attr
);
5132 if (attr
->ia_valid
) {
5133 setattr_copy(idmap
, inode
, attr
);
5134 inode_inc_iversion(inode
);
5135 err
= btrfs_dirty_inode(BTRFS_I(inode
));
5137 if (!err
&& attr
->ia_valid
& ATTR_MODE
)
5138 err
= posix_acl_chmod(idmap
, dentry
, inode
->i_mode
);
5145 * While truncating the inode pages during eviction, we get the VFS
5146 * calling btrfs_invalidate_folio() against each folio of the inode. This
5147 * is slow because the calls to btrfs_invalidate_folio() result in a
5148 * huge amount of calls to lock_extent() and clear_extent_bit(),
5149 * which keep merging and splitting extent_state structures over and over,
5150 * wasting lots of time.
5152 * Therefore if the inode is being evicted, let btrfs_invalidate_folio()
5153 * skip all those expensive operations on a per folio basis and do only
5154 * the ordered io finishing, while we release here the extent_map and
5155 * extent_state structures, without the excessive merging and splitting.
5157 static void evict_inode_truncate_pages(struct inode
*inode
)
5159 struct extent_io_tree
*io_tree
= &BTRFS_I(inode
)->io_tree
;
5160 struct rb_node
*node
;
5162 ASSERT(inode
->i_state
& I_FREEING
);
5163 truncate_inode_pages_final(&inode
->i_data
);
5165 btrfs_drop_extent_map_range(BTRFS_I(inode
), 0, (u64
)-1, false);
5168 * Keep looping until we have no more ranges in the io tree.
5169 * We can have ongoing bios started by readahead that have
5170 * their endio callback (extent_io.c:end_bio_extent_readpage)
5171 * still in progress (unlocked the pages in the bio but did not yet
5172 * unlocked the ranges in the io tree). Therefore this means some
5173 * ranges can still be locked and eviction started because before
5174 * submitting those bios, which are executed by a separate task (work
5175 * queue kthread), inode references (inode->i_count) were not taken
5176 * (which would be dropped in the end io callback of each bio).
5177 * Therefore here we effectively end up waiting for those bios and
5178 * anyone else holding locked ranges without having bumped the inode's
5179 * reference count - if we don't do it, when they access the inode's
5180 * io_tree to unlock a range it may be too late, leading to an
5181 * use-after-free issue.
5183 spin_lock(&io_tree
->lock
);
5184 while (!RB_EMPTY_ROOT(&io_tree
->state
)) {
5185 struct extent_state
*state
;
5186 struct extent_state
*cached_state
= NULL
;
5189 unsigned state_flags
;
5191 node
= rb_first(&io_tree
->state
);
5192 state
= rb_entry(node
, struct extent_state
, rb_node
);
5193 start
= state
->start
;
5195 state_flags
= state
->state
;
5196 spin_unlock(&io_tree
->lock
);
5198 lock_extent(io_tree
, start
, end
, &cached_state
);
5201 * If still has DELALLOC flag, the extent didn't reach disk,
5202 * and its reserved space won't be freed by delayed_ref.
5203 * So we need to free its reserved space here.
5204 * (Refer to comment in btrfs_invalidate_folio, case 2)
5206 * Note, end is the bytenr of last byte, so we need + 1 here.
5208 if (state_flags
& EXTENT_DELALLOC
)
5209 btrfs_qgroup_free_data(BTRFS_I(inode
), NULL
, start
,
5210 end
- start
+ 1, NULL
);
5212 clear_extent_bit(io_tree
, start
, end
,
5213 EXTENT_CLEAR_ALL_BITS
| EXTENT_DO_ACCOUNTING
,
5217 spin_lock(&io_tree
->lock
);
5219 spin_unlock(&io_tree
->lock
);
5222 static struct btrfs_trans_handle
*evict_refill_and_join(struct btrfs_root
*root
,
5223 struct btrfs_block_rsv
*rsv
)
5225 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
5226 struct btrfs_trans_handle
*trans
;
5227 u64 delayed_refs_extra
= btrfs_calc_delayed_ref_bytes(fs_info
, 1);
5231 * Eviction should be taking place at some place safe because of our
5232 * delayed iputs. However the normal flushing code will run delayed
5233 * iputs, so we cannot use FLUSH_ALL otherwise we'll deadlock.
5235 * We reserve the delayed_refs_extra here again because we can't use
5236 * btrfs_start_transaction(root, 0) for the same deadlocky reason as
5237 * above. We reserve our extra bit here because we generate a ton of
5238 * delayed refs activity by truncating.
5240 * BTRFS_RESERVE_FLUSH_EVICT will steal from the global_rsv if it can,
5241 * if we fail to make this reservation we can re-try without the
5242 * delayed_refs_extra so we can make some forward progress.
5244 ret
= btrfs_block_rsv_refill(fs_info
, rsv
, rsv
->size
+ delayed_refs_extra
,
5245 BTRFS_RESERVE_FLUSH_EVICT
);
5247 ret
= btrfs_block_rsv_refill(fs_info
, rsv
, rsv
->size
,
5248 BTRFS_RESERVE_FLUSH_EVICT
);
5251 "could not allocate space for delete; will truncate on mount");
5252 return ERR_PTR(-ENOSPC
);
5254 delayed_refs_extra
= 0;
5257 trans
= btrfs_join_transaction(root
);
5261 if (delayed_refs_extra
) {
5262 trans
->block_rsv
= &fs_info
->trans_block_rsv
;
5263 trans
->bytes_reserved
= delayed_refs_extra
;
5264 btrfs_block_rsv_migrate(rsv
, trans
->block_rsv
,
5265 delayed_refs_extra
, true);
5270 void btrfs_evict_inode(struct inode
*inode
)
5272 struct btrfs_fs_info
*fs_info
;
5273 struct btrfs_trans_handle
*trans
;
5274 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
5275 struct btrfs_block_rsv
*rsv
= NULL
;
5278 trace_btrfs_inode_evict(inode
);
5281 fsverity_cleanup_inode(inode
);
5286 fs_info
= inode_to_fs_info(inode
);
5287 evict_inode_truncate_pages(inode
);
5289 if (inode
->i_nlink
&&
5290 ((btrfs_root_refs(&root
->root_item
) != 0 &&
5291 btrfs_root_id(root
) != BTRFS_ROOT_TREE_OBJECTID
) ||
5292 btrfs_is_free_space_inode(BTRFS_I(inode
))))
5295 if (is_bad_inode(inode
))
5298 if (test_bit(BTRFS_FS_LOG_RECOVERING
, &fs_info
->flags
))
5301 if (inode
->i_nlink
> 0) {
5302 BUG_ON(btrfs_root_refs(&root
->root_item
) != 0 &&
5303 btrfs_root_id(root
) != BTRFS_ROOT_TREE_OBJECTID
);
5308 * This makes sure the inode item in tree is uptodate and the space for
5309 * the inode update is released.
5311 ret
= btrfs_commit_inode_delayed_inode(BTRFS_I(inode
));
5316 * This drops any pending insert or delete operations we have for this
5317 * inode. We could have a delayed dir index deletion queued up, but
5318 * we're removing the inode completely so that'll be taken care of in
5321 btrfs_kill_delayed_inode_items(BTRFS_I(inode
));
5323 rsv
= btrfs_alloc_block_rsv(fs_info
, BTRFS_BLOCK_RSV_TEMP
);
5326 rsv
->size
= btrfs_calc_metadata_size(fs_info
, 1);
5327 rsv
->failfast
= true;
5329 btrfs_i_size_write(BTRFS_I(inode
), 0);
5332 struct btrfs_truncate_control control
= {
5333 .inode
= BTRFS_I(inode
),
5334 .ino
= btrfs_ino(BTRFS_I(inode
)),
5339 trans
= evict_refill_and_join(root
, rsv
);
5343 trans
->block_rsv
= rsv
;
5345 ret
= btrfs_truncate_inode_items(trans
, root
, &control
);
5346 trans
->block_rsv
= &fs_info
->trans_block_rsv
;
5347 btrfs_end_transaction(trans
);
5349 * We have not added new delayed items for our inode after we
5350 * have flushed its delayed items, so no need to throttle on
5351 * delayed items. However we have modified extent buffers.
5353 btrfs_btree_balance_dirty_nodelay(fs_info
);
5354 if (ret
&& ret
!= -ENOSPC
&& ret
!= -EAGAIN
)
5361 * Errors here aren't a big deal, it just means we leave orphan items in
5362 * the tree. They will be cleaned up on the next mount. If the inode
5363 * number gets reused, cleanup deletes the orphan item without doing
5364 * anything, and unlink reuses the existing orphan item.
5366 * If it turns out that we are dropping too many of these, we might want
5367 * to add a mechanism for retrying these after a commit.
5369 trans
= evict_refill_and_join(root
, rsv
);
5370 if (!IS_ERR(trans
)) {
5371 trans
->block_rsv
= rsv
;
5372 btrfs_orphan_del(trans
, BTRFS_I(inode
));
5373 trans
->block_rsv
= &fs_info
->trans_block_rsv
;
5374 btrfs_end_transaction(trans
);
5378 btrfs_free_block_rsv(fs_info
, rsv
);
5380 * If we didn't successfully delete, the orphan item will still be in
5381 * the tree and we'll retry on the next mount. Again, we might also want
5382 * to retry these periodically in the future.
5384 btrfs_remove_delayed_node(BTRFS_I(inode
));
5385 fsverity_cleanup_inode(inode
);
5390 * Return the key found in the dir entry in the location pointer, fill @type
5391 * with BTRFS_FT_*, and return 0.
5393 * If no dir entries were found, returns -ENOENT.
5394 * If found a corrupted location in dir entry, returns -EUCLEAN.
5396 static int btrfs_inode_by_name(struct btrfs_inode
*dir
, struct dentry
*dentry
,
5397 struct btrfs_key
*location
, u8
*type
)
5399 struct btrfs_dir_item
*di
;
5400 struct btrfs_path
*path
;
5401 struct btrfs_root
*root
= dir
->root
;
5403 struct fscrypt_name fname
;
5405 path
= btrfs_alloc_path();
5409 ret
= fscrypt_setup_filename(&dir
->vfs_inode
, &dentry
->d_name
, 1, &fname
);
5413 * fscrypt_setup_filename() should never return a positive value, but
5414 * gcc on sparc/parisc thinks it can, so assert that doesn't happen.
5418 /* This needs to handle no-key deletions later on */
5420 di
= btrfs_lookup_dir_item(NULL
, root
, path
, btrfs_ino(dir
),
5421 &fname
.disk_name
, 0);
5422 if (IS_ERR_OR_NULL(di
)) {
5423 ret
= di
? PTR_ERR(di
) : -ENOENT
;
5427 btrfs_dir_item_key_to_cpu(path
->nodes
[0], di
, location
);
5428 if (location
->type
!= BTRFS_INODE_ITEM_KEY
&&
5429 location
->type
!= BTRFS_ROOT_ITEM_KEY
) {
5431 btrfs_warn(root
->fs_info
,
5432 "%s gets something invalid in DIR_ITEM (name %s, directory ino %llu, location(%llu %u %llu))",
5433 __func__
, fname
.disk_name
.name
, btrfs_ino(dir
),
5434 location
->objectid
, location
->type
, location
->offset
);
5437 *type
= btrfs_dir_ftype(path
->nodes
[0], di
);
5439 fscrypt_free_filename(&fname
);
5440 btrfs_free_path(path
);
5445 * when we hit a tree root in a directory, the btrfs part of the inode
5446 * needs to be changed to reflect the root directory of the tree root. This
5447 * is kind of like crossing a mount point.
5449 static int fixup_tree_root_location(struct btrfs_fs_info
*fs_info
,
5450 struct btrfs_inode
*dir
,
5451 struct dentry
*dentry
,
5452 struct btrfs_key
*location
,
5453 struct btrfs_root
**sub_root
)
5455 struct btrfs_path
*path
;
5456 struct btrfs_root
*new_root
;
5457 struct btrfs_root_ref
*ref
;
5458 struct extent_buffer
*leaf
;
5459 struct btrfs_key key
;
5462 struct fscrypt_name fname
;
5464 ret
= fscrypt_setup_filename(&dir
->vfs_inode
, &dentry
->d_name
, 0, &fname
);
5468 path
= btrfs_alloc_path();
5475 key
.objectid
= btrfs_root_id(dir
->root
);
5476 key
.type
= BTRFS_ROOT_REF_KEY
;
5477 key
.offset
= location
->objectid
;
5479 ret
= btrfs_search_slot(NULL
, fs_info
->tree_root
, &key
, path
, 0, 0);
5486 leaf
= path
->nodes
[0];
5487 ref
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_root_ref
);
5488 if (btrfs_root_ref_dirid(leaf
, ref
) != btrfs_ino(dir
) ||
5489 btrfs_root_ref_name_len(leaf
, ref
) != fname
.disk_name
.len
)
5492 ret
= memcmp_extent_buffer(leaf
, fname
.disk_name
.name
,
5493 (unsigned long)(ref
+ 1), fname
.disk_name
.len
);
5497 btrfs_release_path(path
);
5499 new_root
= btrfs_get_fs_root(fs_info
, location
->objectid
, true);
5500 if (IS_ERR(new_root
)) {
5501 err
= PTR_ERR(new_root
);
5505 *sub_root
= new_root
;
5506 location
->objectid
= btrfs_root_dirid(&new_root
->root_item
);
5507 location
->type
= BTRFS_INODE_ITEM_KEY
;
5508 location
->offset
= 0;
5511 btrfs_free_path(path
);
5512 fscrypt_free_filename(&fname
);
5518 static void btrfs_del_inode_from_root(struct btrfs_inode
*inode
)
5520 struct btrfs_root
*root
= inode
->root
;
5521 struct btrfs_inode
*entry
;
5524 xa_lock(&root
->inodes
);
5525 entry
= __xa_erase(&root
->inodes
, btrfs_ino(inode
));
5527 empty
= xa_empty(&root
->inodes
);
5528 xa_unlock(&root
->inodes
);
5530 if (empty
&& btrfs_root_refs(&root
->root_item
) == 0) {
5531 xa_lock(&root
->inodes
);
5532 empty
= xa_empty(&root
->inodes
);
5533 xa_unlock(&root
->inodes
);
5535 btrfs_add_dead_root(root
);
5540 static int btrfs_init_locked_inode(struct inode
*inode
, void *p
)
5542 struct btrfs_iget_args
*args
= p
;
5544 btrfs_set_inode_number(BTRFS_I(inode
), args
->ino
);
5545 BTRFS_I(inode
)->root
= btrfs_grab_root(args
->root
);
5547 if (args
->root
&& args
->root
== args
->root
->fs_info
->tree_root
&&
5548 args
->ino
!= BTRFS_BTREE_INODE_OBJECTID
)
5549 set_bit(BTRFS_INODE_FREE_SPACE_INODE
,
5550 &BTRFS_I(inode
)->runtime_flags
);
5554 static int btrfs_find_actor(struct inode
*inode
, void *opaque
)
5556 struct btrfs_iget_args
*args
= opaque
;
5558 return args
->ino
== btrfs_ino(BTRFS_I(inode
)) &&
5559 args
->root
== BTRFS_I(inode
)->root
;
5562 static struct inode
*btrfs_iget_locked(u64 ino
, struct btrfs_root
*root
)
5564 struct inode
*inode
;
5565 struct btrfs_iget_args args
;
5566 unsigned long hashval
= btrfs_inode_hash(ino
, root
);
5571 inode
= iget5_locked_rcu(root
->fs_info
->sb
, hashval
, btrfs_find_actor
,
5572 btrfs_init_locked_inode
,
5578 * Get an inode object given its inode number and corresponding root. Path is
5579 * preallocated to prevent recursing back to iget through allocator.
5581 struct inode
*btrfs_iget_path(u64 ino
, struct btrfs_root
*root
,
5582 struct btrfs_path
*path
)
5584 struct inode
*inode
;
5587 inode
= btrfs_iget_locked(ino
, root
);
5589 return ERR_PTR(-ENOMEM
);
5591 if (!(inode
->i_state
& I_NEW
))
5594 ret
= btrfs_read_locked_inode(inode
, path
);
5596 return ERR_PTR(ret
);
5598 unlock_new_inode(inode
);
5603 * Get an inode object given its inode number and corresponding root.
5605 struct inode
*btrfs_iget(u64 ino
, struct btrfs_root
*root
)
5607 struct inode
*inode
;
5608 struct btrfs_path
*path
;
5611 inode
= btrfs_iget_locked(ino
, root
);
5613 return ERR_PTR(-ENOMEM
);
5615 if (!(inode
->i_state
& I_NEW
))
5618 path
= btrfs_alloc_path();
5620 return ERR_PTR(-ENOMEM
);
5622 ret
= btrfs_read_locked_inode(inode
, path
);
5623 btrfs_free_path(path
);
5625 return ERR_PTR(ret
);
5627 unlock_new_inode(inode
);
5631 static struct inode
*new_simple_dir(struct inode
*dir
,
5632 struct btrfs_key
*key
,
5633 struct btrfs_root
*root
)
5635 struct timespec64 ts
;
5636 struct inode
*inode
= new_inode(dir
->i_sb
);
5639 return ERR_PTR(-ENOMEM
);
5641 BTRFS_I(inode
)->root
= btrfs_grab_root(root
);
5642 BTRFS_I(inode
)->ref_root_id
= key
->objectid
;
5643 set_bit(BTRFS_INODE_ROOT_STUB
, &BTRFS_I(inode
)->runtime_flags
);
5644 set_bit(BTRFS_INODE_DUMMY
, &BTRFS_I(inode
)->runtime_flags
);
5646 btrfs_set_inode_number(BTRFS_I(inode
), BTRFS_EMPTY_SUBVOL_DIR_OBJECTID
);
5648 * We only need lookup, the rest is read-only and there's no inode
5649 * associated with the dentry
5651 inode
->i_op
= &simple_dir_inode_operations
;
5652 inode
->i_opflags
&= ~IOP_XATTR
;
5653 inode
->i_fop
= &simple_dir_operations
;
5654 inode
->i_mode
= S_IFDIR
| S_IRUGO
| S_IWUSR
| S_IXUGO
;
5656 ts
= inode_set_ctime_current(inode
);
5657 inode_set_mtime_to_ts(inode
, ts
);
5658 inode_set_atime_to_ts(inode
, inode_get_atime(dir
));
5659 BTRFS_I(inode
)->i_otime_sec
= ts
.tv_sec
;
5660 BTRFS_I(inode
)->i_otime_nsec
= ts
.tv_nsec
;
5662 inode
->i_uid
= dir
->i_uid
;
5663 inode
->i_gid
= dir
->i_gid
;
5668 static_assert(BTRFS_FT_UNKNOWN
== FT_UNKNOWN
);
5669 static_assert(BTRFS_FT_REG_FILE
== FT_REG_FILE
);
5670 static_assert(BTRFS_FT_DIR
== FT_DIR
);
5671 static_assert(BTRFS_FT_CHRDEV
== FT_CHRDEV
);
5672 static_assert(BTRFS_FT_BLKDEV
== FT_BLKDEV
);
5673 static_assert(BTRFS_FT_FIFO
== FT_FIFO
);
5674 static_assert(BTRFS_FT_SOCK
== FT_SOCK
);
5675 static_assert(BTRFS_FT_SYMLINK
== FT_SYMLINK
);
5677 static inline u8
btrfs_inode_type(struct inode
*inode
)
5679 return fs_umode_to_ftype(inode
->i_mode
);
5682 struct inode
*btrfs_lookup_dentry(struct inode
*dir
, struct dentry
*dentry
)
5684 struct btrfs_fs_info
*fs_info
= inode_to_fs_info(dir
);
5685 struct inode
*inode
;
5686 struct btrfs_root
*root
= BTRFS_I(dir
)->root
;
5687 struct btrfs_root
*sub_root
= root
;
5688 struct btrfs_key location
= { 0 };
5692 if (dentry
->d_name
.len
> BTRFS_NAME_LEN
)
5693 return ERR_PTR(-ENAMETOOLONG
);
5695 ret
= btrfs_inode_by_name(BTRFS_I(dir
), dentry
, &location
, &di_type
);
5697 return ERR_PTR(ret
);
5699 if (location
.type
== BTRFS_INODE_ITEM_KEY
) {
5700 inode
= btrfs_iget(location
.objectid
, root
);
5704 /* Do extra check against inode mode with di_type */
5705 if (btrfs_inode_type(inode
) != di_type
) {
5707 "inode mode mismatch with dir: inode mode=0%o btrfs type=%u dir type=%u",
5708 inode
->i_mode
, btrfs_inode_type(inode
),
5711 return ERR_PTR(-EUCLEAN
);
5716 ret
= fixup_tree_root_location(fs_info
, BTRFS_I(dir
), dentry
,
5717 &location
, &sub_root
);
5720 inode
= ERR_PTR(ret
);
5722 inode
= new_simple_dir(dir
, &location
, root
);
5724 inode
= btrfs_iget(location
.objectid
, sub_root
);
5725 btrfs_put_root(sub_root
);
5730 down_read(&fs_info
->cleanup_work_sem
);
5731 if (!sb_rdonly(inode
->i_sb
))
5732 ret
= btrfs_orphan_cleanup(sub_root
);
5733 up_read(&fs_info
->cleanup_work_sem
);
5736 inode
= ERR_PTR(ret
);
5743 static int btrfs_dentry_delete(const struct dentry
*dentry
)
5745 struct btrfs_root
*root
;
5746 struct inode
*inode
= d_inode(dentry
);
5748 if (!inode
&& !IS_ROOT(dentry
))
5749 inode
= d_inode(dentry
->d_parent
);
5752 root
= BTRFS_I(inode
)->root
;
5753 if (btrfs_root_refs(&root
->root_item
) == 0)
5756 if (btrfs_ino(BTRFS_I(inode
)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID
)
5762 static struct dentry
*btrfs_lookup(struct inode
*dir
, struct dentry
*dentry
,
5765 struct inode
*inode
= btrfs_lookup_dentry(dir
, dentry
);
5767 if (inode
== ERR_PTR(-ENOENT
))
5769 return d_splice_alias(inode
, dentry
);
5773 * Find the highest existing sequence number in a directory and then set the
5774 * in-memory index_cnt variable to the first free sequence number.
5776 static int btrfs_set_inode_index_count(struct btrfs_inode
*inode
)
5778 struct btrfs_root
*root
= inode
->root
;
5779 struct btrfs_key key
, found_key
;
5780 struct btrfs_path
*path
;
5781 struct extent_buffer
*leaf
;
5784 key
.objectid
= btrfs_ino(inode
);
5785 key
.type
= BTRFS_DIR_INDEX_KEY
;
5786 key
.offset
= (u64
)-1;
5788 path
= btrfs_alloc_path();
5792 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
5795 /* FIXME: we should be able to handle this */
5800 if (path
->slots
[0] == 0) {
5801 inode
->index_cnt
= BTRFS_DIR_START_INDEX
;
5807 leaf
= path
->nodes
[0];
5808 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
5810 if (found_key
.objectid
!= btrfs_ino(inode
) ||
5811 found_key
.type
!= BTRFS_DIR_INDEX_KEY
) {
5812 inode
->index_cnt
= BTRFS_DIR_START_INDEX
;
5816 inode
->index_cnt
= found_key
.offset
+ 1;
5818 btrfs_free_path(path
);
5822 static int btrfs_get_dir_last_index(struct btrfs_inode
*dir
, u64
*index
)
5826 btrfs_inode_lock(dir
, 0);
5827 if (dir
->index_cnt
== (u64
)-1) {
5828 ret
= btrfs_inode_delayed_dir_index_count(dir
);
5830 ret
= btrfs_set_inode_index_count(dir
);
5836 /* index_cnt is the index number of next new entry, so decrement it. */
5837 *index
= dir
->index_cnt
- 1;
5839 btrfs_inode_unlock(dir
, 0);
5845 * All this infrastructure exists because dir_emit can fault, and we are holding
5846 * the tree lock when doing readdir. For now just allocate a buffer and copy
5847 * our information into that, and then dir_emit from the buffer. This is
5848 * similar to what NFS does, only we don't keep the buffer around in pagecache
5849 * because I'm afraid I'll mess that up. Long term we need to make filldir do
5850 * copy_to_user_inatomic so we don't have to worry about page faulting under the
5853 static int btrfs_opendir(struct inode
*inode
, struct file
*file
)
5855 struct btrfs_file_private
*private;
5859 ret
= btrfs_get_dir_last_index(BTRFS_I(inode
), &last_index
);
5863 private = kzalloc(sizeof(struct btrfs_file_private
), GFP_KERNEL
);
5866 private->last_index
= last_index
;
5867 private->filldir_buf
= kzalloc(PAGE_SIZE
, GFP_KERNEL
);
5868 if (!private->filldir_buf
) {
5872 file
->private_data
= private;
5876 static loff_t
btrfs_dir_llseek(struct file
*file
, loff_t offset
, int whence
)
5878 struct btrfs_file_private
*private = file
->private_data
;
5881 ret
= btrfs_get_dir_last_index(BTRFS_I(file_inode(file
)),
5882 &private->last_index
);
5886 return generic_file_llseek(file
, offset
, whence
);
5896 static int btrfs_filldir(void *addr
, int entries
, struct dir_context
*ctx
)
5899 struct dir_entry
*entry
= addr
;
5900 char *name
= (char *)(entry
+ 1);
5902 ctx
->pos
= get_unaligned(&entry
->offset
);
5903 if (!dir_emit(ctx
, name
, get_unaligned(&entry
->name_len
),
5904 get_unaligned(&entry
->ino
),
5905 get_unaligned(&entry
->type
)))
5907 addr
+= sizeof(struct dir_entry
) +
5908 get_unaligned(&entry
->name_len
);
5914 static int btrfs_real_readdir(struct file
*file
, struct dir_context
*ctx
)
5916 struct inode
*inode
= file_inode(file
);
5917 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
5918 struct btrfs_file_private
*private = file
->private_data
;
5919 struct btrfs_dir_item
*di
;
5920 struct btrfs_key key
;
5921 struct btrfs_key found_key
;
5922 struct btrfs_path
*path
;
5924 LIST_HEAD(ins_list
);
5925 LIST_HEAD(del_list
);
5932 struct btrfs_key location
;
5934 if (!dir_emit_dots(file
, ctx
))
5937 path
= btrfs_alloc_path();
5941 addr
= private->filldir_buf
;
5942 path
->reada
= READA_FORWARD
;
5944 put
= btrfs_readdir_get_delayed_items(BTRFS_I(inode
), private->last_index
,
5945 &ins_list
, &del_list
);
5948 key
.type
= BTRFS_DIR_INDEX_KEY
;
5949 key
.offset
= ctx
->pos
;
5950 key
.objectid
= btrfs_ino(BTRFS_I(inode
));
5952 btrfs_for_each_slot(root
, &key
, &found_key
, path
, ret
) {
5953 struct dir_entry
*entry
;
5954 struct extent_buffer
*leaf
= path
->nodes
[0];
5957 if (found_key
.objectid
!= key
.objectid
)
5959 if (found_key
.type
!= BTRFS_DIR_INDEX_KEY
)
5961 if (found_key
.offset
< ctx
->pos
)
5963 if (found_key
.offset
> private->last_index
)
5965 if (btrfs_should_delete_dir_index(&del_list
, found_key
.offset
))
5967 di
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_dir_item
);
5968 name_len
= btrfs_dir_name_len(leaf
, di
);
5969 if ((total_len
+ sizeof(struct dir_entry
) + name_len
) >=
5971 btrfs_release_path(path
);
5972 ret
= btrfs_filldir(private->filldir_buf
, entries
, ctx
);
5975 addr
= private->filldir_buf
;
5981 ftype
= btrfs_dir_flags_to_ftype(btrfs_dir_flags(leaf
, di
));
5983 name_ptr
= (char *)(entry
+ 1);
5984 read_extent_buffer(leaf
, name_ptr
,
5985 (unsigned long)(di
+ 1), name_len
);
5986 put_unaligned(name_len
, &entry
->name_len
);
5987 put_unaligned(fs_ftype_to_dtype(ftype
), &entry
->type
);
5988 btrfs_dir_item_key_to_cpu(leaf
, di
, &location
);
5989 put_unaligned(location
.objectid
, &entry
->ino
);
5990 put_unaligned(found_key
.offset
, &entry
->offset
);
5992 addr
+= sizeof(struct dir_entry
) + name_len
;
5993 total_len
+= sizeof(struct dir_entry
) + name_len
;
5995 /* Catch error encountered during iteration */
5999 btrfs_release_path(path
);
6001 ret
= btrfs_filldir(private->filldir_buf
, entries
, ctx
);
6005 ret
= btrfs_readdir_delayed_dir_index(ctx
, &ins_list
);
6010 * Stop new entries from being returned after we return the last
6013 * New directory entries are assigned a strictly increasing
6014 * offset. This means that new entries created during readdir
6015 * are *guaranteed* to be seen in the future by that readdir.
6016 * This has broken buggy programs which operate on names as
6017 * they're returned by readdir. Until we reuse freed offsets
6018 * we have this hack to stop new entries from being returned
6019 * under the assumption that they'll never reach this huge
6022 * This is being careful not to overflow 32bit loff_t unless the
6023 * last entry requires it because doing so has broken 32bit apps
6026 if (ctx
->pos
>= INT_MAX
)
6027 ctx
->pos
= LLONG_MAX
;
6034 btrfs_readdir_put_delayed_items(BTRFS_I(inode
), &ins_list
, &del_list
);
6035 btrfs_free_path(path
);
6040 * This is somewhat expensive, updating the tree every time the
6041 * inode changes. But, it is most likely to find the inode in cache.
6042 * FIXME, needs more benchmarking...there are no reasons other than performance
6043 * to keep or drop this code.
6045 static int btrfs_dirty_inode(struct btrfs_inode
*inode
)
6047 struct btrfs_root
*root
= inode
->root
;
6048 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
6049 struct btrfs_trans_handle
*trans
;
6052 if (test_bit(BTRFS_INODE_DUMMY
, &inode
->runtime_flags
))
6055 trans
= btrfs_join_transaction(root
);
6057 return PTR_ERR(trans
);
6059 ret
= btrfs_update_inode(trans
, inode
);
6060 if (ret
== -ENOSPC
|| ret
== -EDQUOT
) {
6061 /* whoops, lets try again with the full transaction */
6062 btrfs_end_transaction(trans
);
6063 trans
= btrfs_start_transaction(root
, 1);
6065 return PTR_ERR(trans
);
6067 ret
= btrfs_update_inode(trans
, inode
);
6069 btrfs_end_transaction(trans
);
6070 if (inode
->delayed_node
)
6071 btrfs_balance_delayed_items(fs_info
);
6077 * This is a copy of file_update_time. We need this so we can return error on
6078 * ENOSPC for updating the inode in the case of file write and mmap writes.
6080 static int btrfs_update_time(struct inode
*inode
, int flags
)
6082 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
6085 if (btrfs_root_readonly(root
))
6088 dirty
= inode_update_timestamps(inode
, flags
);
6089 return dirty
? btrfs_dirty_inode(BTRFS_I(inode
)) : 0;
6093 * helper to find a free sequence number in a given directory. This current
6094 * code is very simple, later versions will do smarter things in the btree
6096 int btrfs_set_inode_index(struct btrfs_inode
*dir
, u64
*index
)
6100 if (dir
->index_cnt
== (u64
)-1) {
6101 ret
= btrfs_inode_delayed_dir_index_count(dir
);
6103 ret
= btrfs_set_inode_index_count(dir
);
6109 *index
= dir
->index_cnt
;
6115 static int btrfs_insert_inode_locked(struct inode
*inode
)
6117 struct btrfs_iget_args args
;
6119 args
.ino
= btrfs_ino(BTRFS_I(inode
));
6120 args
.root
= BTRFS_I(inode
)->root
;
6122 return insert_inode_locked4(inode
,
6123 btrfs_inode_hash(inode
->i_ino
, BTRFS_I(inode
)->root
),
6124 btrfs_find_actor
, &args
);
6127 int btrfs_new_inode_prepare(struct btrfs_new_inode_args
*args
,
6128 unsigned int *trans_num_items
)
6130 struct inode
*dir
= args
->dir
;
6131 struct inode
*inode
= args
->inode
;
6134 if (!args
->orphan
) {
6135 ret
= fscrypt_setup_filename(dir
, &args
->dentry
->d_name
, 0,
6141 ret
= posix_acl_create(dir
, &inode
->i_mode
, &args
->default_acl
, &args
->acl
);
6143 fscrypt_free_filename(&args
->fname
);
6147 /* 1 to add inode item */
6148 *trans_num_items
= 1;
6149 /* 1 to add compression property */
6150 if (BTRFS_I(dir
)->prop_compress
)
6151 (*trans_num_items
)++;
6152 /* 1 to add default ACL xattr */
6153 if (args
->default_acl
)
6154 (*trans_num_items
)++;
6155 /* 1 to add access ACL xattr */
6157 (*trans_num_items
)++;
6158 #ifdef CONFIG_SECURITY
6159 /* 1 to add LSM xattr */
6160 if (dir
->i_security
)
6161 (*trans_num_items
)++;
6164 /* 1 to add orphan item */
6165 (*trans_num_items
)++;
6169 * 1 to add dir index
6170 * 1 to update parent inode item
6172 * No need for 1 unit for the inode ref item because it is
6173 * inserted in a batch together with the inode item at
6174 * btrfs_create_new_inode().
6176 *trans_num_items
+= 3;
6181 void btrfs_new_inode_args_destroy(struct btrfs_new_inode_args
*args
)
6183 posix_acl_release(args
->acl
);
6184 posix_acl_release(args
->default_acl
);
6185 fscrypt_free_filename(&args
->fname
);
6189 * Inherit flags from the parent inode.
6191 * Currently only the compression flags and the cow flags are inherited.
6193 static void btrfs_inherit_iflags(struct btrfs_inode
*inode
, struct btrfs_inode
*dir
)
6199 if (flags
& BTRFS_INODE_NOCOMPRESS
) {
6200 inode
->flags
&= ~BTRFS_INODE_COMPRESS
;
6201 inode
->flags
|= BTRFS_INODE_NOCOMPRESS
;
6202 } else if (flags
& BTRFS_INODE_COMPRESS
) {
6203 inode
->flags
&= ~BTRFS_INODE_NOCOMPRESS
;
6204 inode
->flags
|= BTRFS_INODE_COMPRESS
;
6207 if (flags
& BTRFS_INODE_NODATACOW
) {
6208 inode
->flags
|= BTRFS_INODE_NODATACOW
;
6209 if (S_ISREG(inode
->vfs_inode
.i_mode
))
6210 inode
->flags
|= BTRFS_INODE_NODATASUM
;
6213 btrfs_sync_inode_flags_to_i_flags(&inode
->vfs_inode
);
6216 int btrfs_create_new_inode(struct btrfs_trans_handle
*trans
,
6217 struct btrfs_new_inode_args
*args
)
6219 struct timespec64 ts
;
6220 struct inode
*dir
= args
->dir
;
6221 struct inode
*inode
= args
->inode
;
6222 const struct fscrypt_str
*name
= args
->orphan
? NULL
: &args
->fname
.disk_name
;
6223 struct btrfs_fs_info
*fs_info
= inode_to_fs_info(dir
);
6224 struct btrfs_root
*root
;
6225 struct btrfs_inode_item
*inode_item
;
6226 struct btrfs_path
*path
;
6228 struct btrfs_inode_ref
*ref
;
6229 struct btrfs_key key
[2];
6231 struct btrfs_item_batch batch
;
6234 bool xa_reserved
= false;
6236 path
= btrfs_alloc_path();
6241 BTRFS_I(inode
)->root
= btrfs_grab_root(BTRFS_I(dir
)->root
);
6242 root
= BTRFS_I(inode
)->root
;
6244 ret
= btrfs_init_file_extent_tree(BTRFS_I(inode
));
6248 ret
= btrfs_get_free_objectid(root
, &objectid
);
6251 btrfs_set_inode_number(BTRFS_I(inode
), objectid
);
6253 ret
= xa_reserve(&root
->inodes
, objectid
, GFP_NOFS
);
6260 * O_TMPFILE, set link count to 0, so that after this point, we
6261 * fill in an inode item with the correct link count.
6263 set_nlink(inode
, 0);
6265 trace_btrfs_inode_request(dir
);
6267 ret
= btrfs_set_inode_index(BTRFS_I(dir
), &BTRFS_I(inode
)->dir_index
);
6272 if (S_ISDIR(inode
->i_mode
))
6273 BTRFS_I(inode
)->index_cnt
= BTRFS_DIR_START_INDEX
;
6275 BTRFS_I(inode
)->generation
= trans
->transid
;
6276 inode
->i_generation
= BTRFS_I(inode
)->generation
;
6279 * We don't have any capability xattrs set here yet, shortcut any
6280 * queries for the xattrs here. If we add them later via the inode
6281 * security init path or any other path this flag will be cleared.
6283 set_bit(BTRFS_INODE_NO_CAP_XATTR
, &BTRFS_I(inode
)->runtime_flags
);
6286 * Subvolumes don't inherit flags from their parent directory.
6287 * Originally this was probably by accident, but we probably can't
6288 * change it now without compatibility issues.
6291 btrfs_inherit_iflags(BTRFS_I(inode
), BTRFS_I(dir
));
6293 if (S_ISREG(inode
->i_mode
)) {
6294 if (btrfs_test_opt(fs_info
, NODATASUM
))
6295 BTRFS_I(inode
)->flags
|= BTRFS_INODE_NODATASUM
;
6296 if (btrfs_test_opt(fs_info
, NODATACOW
))
6297 BTRFS_I(inode
)->flags
|= BTRFS_INODE_NODATACOW
|
6298 BTRFS_INODE_NODATASUM
;
6301 ret
= btrfs_insert_inode_locked(inode
);
6304 BTRFS_I(dir
)->index_cnt
--;
6309 * We could have gotten an inode number from somebody who was fsynced
6310 * and then removed in this same transaction, so let's just set full
6311 * sync since it will be a full sync anyway and this will blow away the
6312 * old info in the log.
6314 btrfs_set_inode_full_sync(BTRFS_I(inode
));
6316 key
[0].objectid
= objectid
;
6317 key
[0].type
= BTRFS_INODE_ITEM_KEY
;
6320 sizes
[0] = sizeof(struct btrfs_inode_item
);
6322 if (!args
->orphan
) {
6324 * Start new inodes with an inode_ref. This is slightly more
6325 * efficient for small numbers of hard links since they will
6326 * be packed into one item. Extended refs will kick in if we
6327 * add more hard links than can fit in the ref item.
6329 key
[1].objectid
= objectid
;
6330 key
[1].type
= BTRFS_INODE_REF_KEY
;
6332 key
[1].offset
= objectid
;
6333 sizes
[1] = 2 + sizeof(*ref
);
6335 key
[1].offset
= btrfs_ino(BTRFS_I(dir
));
6336 sizes
[1] = name
->len
+ sizeof(*ref
);
6340 batch
.keys
= &key
[0];
6341 batch
.data_sizes
= &sizes
[0];
6342 batch
.total_data_size
= sizes
[0] + (args
->orphan
? 0 : sizes
[1]);
6343 batch
.nr
= args
->orphan
? 1 : 2;
6344 ret
= btrfs_insert_empty_items(trans
, root
, path
, &batch
);
6346 btrfs_abort_transaction(trans
, ret
);
6350 ts
= simple_inode_init_ts(inode
);
6351 BTRFS_I(inode
)->i_otime_sec
= ts
.tv_sec
;
6352 BTRFS_I(inode
)->i_otime_nsec
= ts
.tv_nsec
;
6355 * We're going to fill the inode item now, so at this point the inode
6356 * must be fully initialized.
6359 inode_item
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
6360 struct btrfs_inode_item
);
6361 memzero_extent_buffer(path
->nodes
[0], (unsigned long)inode_item
,
6362 sizeof(*inode_item
));
6363 fill_inode_item(trans
, path
->nodes
[0], inode_item
, inode
);
6365 if (!args
->orphan
) {
6366 ref
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0] + 1,
6367 struct btrfs_inode_ref
);
6368 ptr
= (unsigned long)(ref
+ 1);
6370 btrfs_set_inode_ref_name_len(path
->nodes
[0], ref
, 2);
6371 btrfs_set_inode_ref_index(path
->nodes
[0], ref
, 0);
6372 write_extent_buffer(path
->nodes
[0], "..", ptr
, 2);
6374 btrfs_set_inode_ref_name_len(path
->nodes
[0], ref
,
6376 btrfs_set_inode_ref_index(path
->nodes
[0], ref
,
6377 BTRFS_I(inode
)->dir_index
);
6378 write_extent_buffer(path
->nodes
[0], name
->name
, ptr
,
6383 btrfs_mark_buffer_dirty(trans
, path
->nodes
[0]);
6385 * We don't need the path anymore, plus inheriting properties, adding
6386 * ACLs, security xattrs, orphan item or adding the link, will result in
6387 * allocating yet another path. So just free our path.
6389 btrfs_free_path(path
);
6393 struct inode
*parent
;
6396 * Subvolumes inherit properties from their parent subvolume,
6397 * not the directory they were created in.
6399 parent
= btrfs_iget(BTRFS_FIRST_FREE_OBJECTID
, BTRFS_I(dir
)->root
);
6400 if (IS_ERR(parent
)) {
6401 ret
= PTR_ERR(parent
);
6403 ret
= btrfs_inode_inherit_props(trans
, inode
, parent
);
6407 ret
= btrfs_inode_inherit_props(trans
, inode
, dir
);
6411 "error inheriting props for ino %llu (root %llu): %d",
6412 btrfs_ino(BTRFS_I(inode
)), btrfs_root_id(root
), ret
);
6416 * Subvolumes don't inherit ACLs or get passed to the LSM. This is
6419 if (!args
->subvol
) {
6420 ret
= btrfs_init_inode_security(trans
, args
);
6422 btrfs_abort_transaction(trans
, ret
);
6427 ret
= btrfs_add_inode_to_root(BTRFS_I(inode
), false);
6429 /* Shouldn't happen, we used xa_reserve() before. */
6430 btrfs_abort_transaction(trans
, ret
);
6434 trace_btrfs_inode_new(inode
);
6435 btrfs_set_inode_last_trans(trans
, BTRFS_I(inode
));
6437 btrfs_update_root_times(trans
, root
);
6440 ret
= btrfs_orphan_add(trans
, BTRFS_I(inode
));
6442 ret
= btrfs_add_link(trans
, BTRFS_I(dir
), BTRFS_I(inode
), name
,
6443 0, BTRFS_I(inode
)->dir_index
);
6446 btrfs_abort_transaction(trans
, ret
);
6454 * discard_new_inode() calls iput(), but the caller owns the reference
6458 discard_new_inode(inode
);
6461 xa_release(&root
->inodes
, objectid
);
6463 btrfs_free_path(path
);
6468 * utility function to add 'inode' into 'parent_inode' with
6469 * a give name and a given sequence number.
6470 * if 'add_backref' is true, also insert a backref from the
6471 * inode to the parent directory.
6473 int btrfs_add_link(struct btrfs_trans_handle
*trans
,
6474 struct btrfs_inode
*parent_inode
, struct btrfs_inode
*inode
,
6475 const struct fscrypt_str
*name
, int add_backref
, u64 index
)
6478 struct btrfs_key key
;
6479 struct btrfs_root
*root
= parent_inode
->root
;
6480 u64 ino
= btrfs_ino(inode
);
6481 u64 parent_ino
= btrfs_ino(parent_inode
);
6483 if (unlikely(ino
== BTRFS_FIRST_FREE_OBJECTID
)) {
6484 memcpy(&key
, &inode
->root
->root_key
, sizeof(key
));
6487 key
.type
= BTRFS_INODE_ITEM_KEY
;
6491 if (unlikely(ino
== BTRFS_FIRST_FREE_OBJECTID
)) {
6492 ret
= btrfs_add_root_ref(trans
, key
.objectid
,
6493 btrfs_root_id(root
), parent_ino
,
6495 } else if (add_backref
) {
6496 ret
= btrfs_insert_inode_ref(trans
, root
, name
,
6497 ino
, parent_ino
, index
);
6500 /* Nothing to clean up yet */
6504 ret
= btrfs_insert_dir_item(trans
, name
, parent_inode
, &key
,
6505 btrfs_inode_type(&inode
->vfs_inode
), index
);
6506 if (ret
== -EEXIST
|| ret
== -EOVERFLOW
)
6509 btrfs_abort_transaction(trans
, ret
);
6513 btrfs_i_size_write(parent_inode
, parent_inode
->vfs_inode
.i_size
+
6515 inode_inc_iversion(&parent_inode
->vfs_inode
);
6517 * If we are replaying a log tree, we do not want to update the mtime
6518 * and ctime of the parent directory with the current time, since the
6519 * log replay procedure is responsible for setting them to their correct
6520 * values (the ones it had when the fsync was done).
6522 if (!test_bit(BTRFS_FS_LOG_RECOVERING
, &root
->fs_info
->flags
))
6523 inode_set_mtime_to_ts(&parent_inode
->vfs_inode
,
6524 inode_set_ctime_current(&parent_inode
->vfs_inode
));
6526 ret
= btrfs_update_inode(trans
, parent_inode
);
6528 btrfs_abort_transaction(trans
, ret
);
6532 if (unlikely(ino
== BTRFS_FIRST_FREE_OBJECTID
)) {
6535 err
= btrfs_del_root_ref(trans
, key
.objectid
,
6536 btrfs_root_id(root
), parent_ino
,
6537 &local_index
, name
);
6539 btrfs_abort_transaction(trans
, err
);
6540 } else if (add_backref
) {
6544 err
= btrfs_del_inode_ref(trans
, root
, name
, ino
, parent_ino
,
6547 btrfs_abort_transaction(trans
, err
);
6550 /* Return the original error code */
6554 static int btrfs_create_common(struct inode
*dir
, struct dentry
*dentry
,
6555 struct inode
*inode
)
6557 struct btrfs_fs_info
*fs_info
= inode_to_fs_info(dir
);
6558 struct btrfs_root
*root
= BTRFS_I(dir
)->root
;
6559 struct btrfs_new_inode_args new_inode_args
= {
6564 unsigned int trans_num_items
;
6565 struct btrfs_trans_handle
*trans
;
6568 err
= btrfs_new_inode_prepare(&new_inode_args
, &trans_num_items
);
6572 trans
= btrfs_start_transaction(root
, trans_num_items
);
6573 if (IS_ERR(trans
)) {
6574 err
= PTR_ERR(trans
);
6575 goto out_new_inode_args
;
6578 err
= btrfs_create_new_inode(trans
, &new_inode_args
);
6580 d_instantiate_new(dentry
, inode
);
6582 btrfs_end_transaction(trans
);
6583 btrfs_btree_balance_dirty(fs_info
);
6585 btrfs_new_inode_args_destroy(&new_inode_args
);
6592 static int btrfs_mknod(struct mnt_idmap
*idmap
, struct inode
*dir
,
6593 struct dentry
*dentry
, umode_t mode
, dev_t rdev
)
6595 struct inode
*inode
;
6597 inode
= new_inode(dir
->i_sb
);
6600 inode_init_owner(idmap
, inode
, dir
, mode
);
6601 inode
->i_op
= &btrfs_special_inode_operations
;
6602 init_special_inode(inode
, inode
->i_mode
, rdev
);
6603 return btrfs_create_common(dir
, dentry
, inode
);
6606 static int btrfs_create(struct mnt_idmap
*idmap
, struct inode
*dir
,
6607 struct dentry
*dentry
, umode_t mode
, bool excl
)
6609 struct inode
*inode
;
6611 inode
= new_inode(dir
->i_sb
);
6614 inode_init_owner(idmap
, inode
, dir
, mode
);
6615 inode
->i_fop
= &btrfs_file_operations
;
6616 inode
->i_op
= &btrfs_file_inode_operations
;
6617 inode
->i_mapping
->a_ops
= &btrfs_aops
;
6618 return btrfs_create_common(dir
, dentry
, inode
);
6621 static int btrfs_link(struct dentry
*old_dentry
, struct inode
*dir
,
6622 struct dentry
*dentry
)
6624 struct btrfs_trans_handle
*trans
= NULL
;
6625 struct btrfs_root
*root
= BTRFS_I(dir
)->root
;
6626 struct inode
*inode
= d_inode(old_dentry
);
6627 struct btrfs_fs_info
*fs_info
= inode_to_fs_info(inode
);
6628 struct fscrypt_name fname
;
6633 /* do not allow sys_link's with other subvols of the same device */
6634 if (btrfs_root_id(root
) != btrfs_root_id(BTRFS_I(inode
)->root
))
6637 if (inode
->i_nlink
>= BTRFS_LINK_MAX
)
6640 err
= fscrypt_setup_filename(dir
, &dentry
->d_name
, 0, &fname
);
6644 err
= btrfs_set_inode_index(BTRFS_I(dir
), &index
);
6649 * 2 items for inode and inode ref
6650 * 2 items for dir items
6651 * 1 item for parent inode
6652 * 1 item for orphan item deletion if O_TMPFILE
6654 trans
= btrfs_start_transaction(root
, inode
->i_nlink
? 5 : 6);
6655 if (IS_ERR(trans
)) {
6656 err
= PTR_ERR(trans
);
6661 /* There are several dir indexes for this inode, clear the cache. */
6662 BTRFS_I(inode
)->dir_index
= 0ULL;
6664 inode_inc_iversion(inode
);
6665 inode_set_ctime_current(inode
);
6667 set_bit(BTRFS_INODE_COPY_EVERYTHING
, &BTRFS_I(inode
)->runtime_flags
);
6669 err
= btrfs_add_link(trans
, BTRFS_I(dir
), BTRFS_I(inode
),
6670 &fname
.disk_name
, 1, index
);
6675 struct dentry
*parent
= dentry
->d_parent
;
6677 err
= btrfs_update_inode(trans
, BTRFS_I(inode
));
6680 if (inode
->i_nlink
== 1) {
6682 * If new hard link count is 1, it's a file created
6683 * with open(2) O_TMPFILE flag.
6685 err
= btrfs_orphan_del(trans
, BTRFS_I(inode
));
6689 d_instantiate(dentry
, inode
);
6690 btrfs_log_new_name(trans
, old_dentry
, NULL
, 0, parent
);
6694 fscrypt_free_filename(&fname
);
6696 btrfs_end_transaction(trans
);
6698 inode_dec_link_count(inode
);
6701 btrfs_btree_balance_dirty(fs_info
);
6705 static int btrfs_mkdir(struct mnt_idmap
*idmap
, struct inode
*dir
,
6706 struct dentry
*dentry
, umode_t mode
)
6708 struct inode
*inode
;
6710 inode
= new_inode(dir
->i_sb
);
6713 inode_init_owner(idmap
, inode
, dir
, S_IFDIR
| mode
);
6714 inode
->i_op
= &btrfs_dir_inode_operations
;
6715 inode
->i_fop
= &btrfs_dir_file_operations
;
6716 return btrfs_create_common(dir
, dentry
, inode
);
6719 static noinline
int uncompress_inline(struct btrfs_path
*path
,
6720 struct folio
*folio
,
6721 struct btrfs_file_extent_item
*item
)
6724 struct extent_buffer
*leaf
= path
->nodes
[0];
6727 unsigned long inline_size
;
6731 compress_type
= btrfs_file_extent_compression(leaf
, item
);
6732 max_size
= btrfs_file_extent_ram_bytes(leaf
, item
);
6733 inline_size
= btrfs_file_extent_inline_item_len(leaf
, path
->slots
[0]);
6734 tmp
= kmalloc(inline_size
, GFP_NOFS
);
6737 ptr
= btrfs_file_extent_inline_start(item
);
6739 read_extent_buffer(leaf
, tmp
, ptr
, inline_size
);
6741 max_size
= min_t(unsigned long, PAGE_SIZE
, max_size
);
6742 ret
= btrfs_decompress(compress_type
, tmp
, folio
, 0, inline_size
,
6746 * decompression code contains a memset to fill in any space between the end
6747 * of the uncompressed data and the end of max_size in case the decompressed
6748 * data ends up shorter than ram_bytes. That doesn't cover the hole between
6749 * the end of an inline extent and the beginning of the next block, so we
6750 * cover that region here.
6753 if (max_size
< PAGE_SIZE
)
6754 folio_zero_range(folio
, max_size
, PAGE_SIZE
- max_size
);
6759 static int read_inline_extent(struct btrfs_path
*path
, struct folio
*folio
)
6761 struct btrfs_file_extent_item
*fi
;
6765 if (!folio
|| folio_test_uptodate(folio
))
6768 ASSERT(folio_pos(folio
) == 0);
6770 fi
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
6771 struct btrfs_file_extent_item
);
6772 if (btrfs_file_extent_compression(path
->nodes
[0], fi
) != BTRFS_COMPRESS_NONE
)
6773 return uncompress_inline(path
, folio
, fi
);
6775 copy_size
= min_t(u64
, PAGE_SIZE
,
6776 btrfs_file_extent_ram_bytes(path
->nodes
[0], fi
));
6777 kaddr
= kmap_local_folio(folio
, 0);
6778 read_extent_buffer(path
->nodes
[0], kaddr
,
6779 btrfs_file_extent_inline_start(fi
), copy_size
);
6780 kunmap_local(kaddr
);
6781 if (copy_size
< PAGE_SIZE
)
6782 folio_zero_range(folio
, copy_size
, PAGE_SIZE
- copy_size
);
6787 * Lookup the first extent overlapping a range in a file.
6789 * @inode: file to search in
6790 * @page: page to read extent data into if the extent is inline
6791 * @start: file offset
6792 * @len: length of range starting at @start
6794 * Return the first &struct extent_map which overlaps the given range, reading
6795 * it from the B-tree and caching it if necessary. Note that there may be more
6796 * extents which overlap the given range after the returned extent_map.
6798 * If @page is not NULL and the extent is inline, this also reads the extent
6799 * data directly into the page and marks the extent up to date in the io_tree.
6801 * Return: ERR_PTR on error, non-NULL extent_map on success.
6803 struct extent_map
*btrfs_get_extent(struct btrfs_inode
*inode
,
6804 struct folio
*folio
, u64 start
, u64 len
)
6806 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
6808 u64 extent_start
= 0;
6810 u64 objectid
= btrfs_ino(inode
);
6811 int extent_type
= -1;
6812 struct btrfs_path
*path
= NULL
;
6813 struct btrfs_root
*root
= inode
->root
;
6814 struct btrfs_file_extent_item
*item
;
6815 struct extent_buffer
*leaf
;
6816 struct btrfs_key found_key
;
6817 struct extent_map
*em
= NULL
;
6818 struct extent_map_tree
*em_tree
= &inode
->extent_tree
;
6820 read_lock(&em_tree
->lock
);
6821 em
= lookup_extent_mapping(em_tree
, start
, len
);
6822 read_unlock(&em_tree
->lock
);
6825 if (em
->start
> start
|| em
->start
+ em
->len
<= start
)
6826 free_extent_map(em
);
6827 else if (em
->disk_bytenr
== EXTENT_MAP_INLINE
&& folio
)
6828 free_extent_map(em
);
6832 em
= alloc_extent_map();
6837 em
->start
= EXTENT_MAP_HOLE
;
6838 em
->disk_bytenr
= EXTENT_MAP_HOLE
;
6841 path
= btrfs_alloc_path();
6847 /* Chances are we'll be called again, so go ahead and do readahead */
6848 path
->reada
= READA_FORWARD
;
6851 * The same explanation in load_free_space_cache applies here as well,
6852 * we only read when we're loading the free space cache, and at that
6853 * point the commit_root has everything we need.
6855 if (btrfs_is_free_space_inode(inode
)) {
6856 path
->search_commit_root
= 1;
6857 path
->skip_locking
= 1;
6860 ret
= btrfs_lookup_file_extent(NULL
, root
, path
, objectid
, start
, 0);
6863 } else if (ret
> 0) {
6864 if (path
->slots
[0] == 0)
6870 leaf
= path
->nodes
[0];
6871 item
= btrfs_item_ptr(leaf
, path
->slots
[0],
6872 struct btrfs_file_extent_item
);
6873 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
6874 if (found_key
.objectid
!= objectid
||
6875 found_key
.type
!= BTRFS_EXTENT_DATA_KEY
) {
6877 * If we backup past the first extent we want to move forward
6878 * and see if there is an extent in front of us, otherwise we'll
6879 * say there is a hole for our whole search range which can
6886 extent_type
= btrfs_file_extent_type(leaf
, item
);
6887 extent_start
= found_key
.offset
;
6888 extent_end
= btrfs_file_extent_end(path
);
6889 if (extent_type
== BTRFS_FILE_EXTENT_REG
||
6890 extent_type
== BTRFS_FILE_EXTENT_PREALLOC
) {
6891 /* Only regular file could have regular/prealloc extent */
6892 if (!S_ISREG(inode
->vfs_inode
.i_mode
)) {
6895 "regular/prealloc extent found for non-regular inode %llu",
6899 trace_btrfs_get_extent_show_fi_regular(inode
, leaf
, item
,
6901 } else if (extent_type
== BTRFS_FILE_EXTENT_INLINE
) {
6902 trace_btrfs_get_extent_show_fi_inline(inode
, leaf
, item
,
6907 if (start
>= extent_end
) {
6909 if (path
->slots
[0] >= btrfs_header_nritems(leaf
)) {
6910 ret
= btrfs_next_leaf(root
, path
);
6916 leaf
= path
->nodes
[0];
6918 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
6919 if (found_key
.objectid
!= objectid
||
6920 found_key
.type
!= BTRFS_EXTENT_DATA_KEY
)
6922 if (start
+ len
<= found_key
.offset
)
6924 if (start
> found_key
.offset
)
6927 /* New extent overlaps with existing one */
6929 em
->len
= found_key
.offset
- start
;
6930 em
->disk_bytenr
= EXTENT_MAP_HOLE
;
6934 btrfs_extent_item_to_extent_map(inode
, path
, item
, em
);
6936 if (extent_type
== BTRFS_FILE_EXTENT_REG
||
6937 extent_type
== BTRFS_FILE_EXTENT_PREALLOC
) {
6939 } else if (extent_type
== BTRFS_FILE_EXTENT_INLINE
) {
6941 * Inline extent can only exist at file offset 0. This is
6942 * ensured by tree-checker and inline extent creation path.
6943 * Thus all members representing file offsets should be zero.
6945 ASSERT(extent_start
== 0);
6946 ASSERT(em
->start
== 0);
6949 * btrfs_extent_item_to_extent_map() should have properly
6950 * initialized em members already.
6952 * Other members are not utilized for inline extents.
6954 ASSERT(em
->disk_bytenr
== EXTENT_MAP_INLINE
);
6955 ASSERT(em
->len
== fs_info
->sectorsize
);
6957 ret
= read_inline_extent(path
, folio
);
6965 em
->disk_bytenr
= EXTENT_MAP_HOLE
;
6968 btrfs_release_path(path
);
6969 if (em
->start
> start
|| extent_map_end(em
) <= start
) {
6971 "bad extent! em: [%llu %llu] passed [%llu %llu]",
6972 em
->start
, em
->len
, start
, len
);
6977 write_lock(&em_tree
->lock
);
6978 ret
= btrfs_add_extent_mapping(inode
, &em
, start
, len
);
6979 write_unlock(&em_tree
->lock
);
6981 btrfs_free_path(path
);
6983 trace_btrfs_get_extent(root
, inode
, em
);
6986 free_extent_map(em
);
6987 return ERR_PTR(ret
);
6992 static bool btrfs_extent_readonly(struct btrfs_fs_info
*fs_info
, u64 bytenr
)
6994 struct btrfs_block_group
*block_group
;
6995 bool readonly
= false;
6997 block_group
= btrfs_lookup_block_group(fs_info
, bytenr
);
6998 if (!block_group
|| block_group
->ro
)
7001 btrfs_put_block_group(block_group
);
7006 * Check if we can do nocow write into the range [@offset, @offset + @len)
7008 * @offset: File offset
7009 * @len: The length to write, will be updated to the nocow writeable
7011 * @orig_start: (optional) Return the original file offset of the file extent
7012 * @orig_len: (optional) Return the original on-disk length of the file extent
7013 * @ram_bytes: (optional) Return the ram_bytes of the file extent
7014 * @strict: if true, omit optimizations that might force us into unnecessary
7015 * cow. e.g., don't trust generation number.
7018 * >0 and update @len if we can do nocow write
7019 * 0 if we can't do nocow write
7020 * <0 if error happened
7022 * NOTE: This only checks the file extents, caller is responsible to wait for
7023 * any ordered extents.
7025 noinline
int can_nocow_extent(struct inode
*inode
, u64 offset
, u64
*len
,
7026 struct btrfs_file_extent
*file_extent
,
7027 bool nowait
, bool strict
)
7029 struct btrfs_fs_info
*fs_info
= inode_to_fs_info(inode
);
7030 struct can_nocow_file_extent_args nocow_args
= { 0 };
7031 struct btrfs_path
*path
;
7033 struct extent_buffer
*leaf
;
7034 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
7035 struct extent_io_tree
*io_tree
= &BTRFS_I(inode
)->io_tree
;
7036 struct btrfs_file_extent_item
*fi
;
7037 struct btrfs_key key
;
7040 path
= btrfs_alloc_path();
7043 path
->nowait
= nowait
;
7045 ret
= btrfs_lookup_file_extent(NULL
, root
, path
,
7046 btrfs_ino(BTRFS_I(inode
)), offset
, 0);
7051 if (path
->slots
[0] == 0) {
7052 /* can't find the item, must cow */
7059 leaf
= path
->nodes
[0];
7060 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
7061 if (key
.objectid
!= btrfs_ino(BTRFS_I(inode
)) ||
7062 key
.type
!= BTRFS_EXTENT_DATA_KEY
) {
7063 /* not our file or wrong item type, must cow */
7067 if (key
.offset
> offset
) {
7068 /* Wrong offset, must cow */
7072 if (btrfs_file_extent_end(path
) <= offset
)
7075 fi
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_file_extent_item
);
7076 found_type
= btrfs_file_extent_type(leaf
, fi
);
7078 nocow_args
.start
= offset
;
7079 nocow_args
.end
= offset
+ *len
- 1;
7080 nocow_args
.strict
= strict
;
7081 nocow_args
.free_path
= true;
7083 ret
= can_nocow_file_extent(path
, &key
, BTRFS_I(inode
), &nocow_args
);
7084 /* can_nocow_file_extent() has freed the path. */
7088 /* Treat errors as not being able to NOCOW. */
7094 if (btrfs_extent_readonly(fs_info
,
7095 nocow_args
.file_extent
.disk_bytenr
+
7096 nocow_args
.file_extent
.offset
))
7099 if (!(BTRFS_I(inode
)->flags
& BTRFS_INODE_NODATACOW
) &&
7100 found_type
== BTRFS_FILE_EXTENT_PREALLOC
) {
7103 range_end
= round_up(offset
+ nocow_args
.file_extent
.num_bytes
,
7104 root
->fs_info
->sectorsize
) - 1;
7105 ret
= test_range_bit_exists(io_tree
, offset
, range_end
, EXTENT_DELALLOC
);
7113 memcpy(file_extent
, &nocow_args
.file_extent
, sizeof(*file_extent
));
7115 *len
= nocow_args
.file_extent
.num_bytes
;
7118 btrfs_free_path(path
);
7122 /* The callers of this must take lock_extent() */
7123 struct extent_map
*btrfs_create_io_em(struct btrfs_inode
*inode
, u64 start
,
7124 const struct btrfs_file_extent
*file_extent
,
7127 struct extent_map
*em
;
7131 * Note the missing NOCOW type.
7133 * For pure NOCOW writes, we should not create an io extent map, but
7134 * just reusing the existing one.
7135 * Only PREALLOC writes (NOCOW write into preallocated range) can
7136 * create an io extent map.
7138 ASSERT(type
== BTRFS_ORDERED_PREALLOC
||
7139 type
== BTRFS_ORDERED_COMPRESSED
||
7140 type
== BTRFS_ORDERED_REGULAR
);
7143 case BTRFS_ORDERED_PREALLOC
:
7144 /* We're only referring part of a larger preallocated extent. */
7145 ASSERT(file_extent
->num_bytes
<= file_extent
->ram_bytes
);
7147 case BTRFS_ORDERED_REGULAR
:
7148 /* COW results a new extent matching our file extent size. */
7149 ASSERT(file_extent
->disk_num_bytes
== file_extent
->num_bytes
);
7150 ASSERT(file_extent
->ram_bytes
== file_extent
->num_bytes
);
7152 /* Since it's a new extent, we should not have any offset. */
7153 ASSERT(file_extent
->offset
== 0);
7155 case BTRFS_ORDERED_COMPRESSED
:
7156 /* Must be compressed. */
7157 ASSERT(file_extent
->compression
!= BTRFS_COMPRESS_NONE
);
7160 * Encoded write can make us to refer to part of the
7161 * uncompressed extent.
7163 ASSERT(file_extent
->num_bytes
<= file_extent
->ram_bytes
);
7167 em
= alloc_extent_map();
7169 return ERR_PTR(-ENOMEM
);
7172 em
->len
= file_extent
->num_bytes
;
7173 em
->disk_bytenr
= file_extent
->disk_bytenr
;
7174 em
->disk_num_bytes
= file_extent
->disk_num_bytes
;
7175 em
->ram_bytes
= file_extent
->ram_bytes
;
7176 em
->generation
= -1;
7177 em
->offset
= file_extent
->offset
;
7178 em
->flags
|= EXTENT_FLAG_PINNED
;
7179 if (type
== BTRFS_ORDERED_COMPRESSED
)
7180 extent_map_set_compression(em
, file_extent
->compression
);
7182 ret
= btrfs_replace_extent_map_range(inode
, em
, true);
7184 free_extent_map(em
);
7185 return ERR_PTR(ret
);
7188 /* em got 2 refs now, callers needs to do free_extent_map once. */
7193 * For release_folio() and invalidate_folio() we have a race window where
7194 * folio_end_writeback() is called but the subpage spinlock is not yet released.
7195 * If we continue to release/invalidate the page, we could cause use-after-free
7196 * for subpage spinlock. So this function is to spin and wait for subpage
7199 static void wait_subpage_spinlock(struct folio
*folio
)
7201 struct btrfs_fs_info
*fs_info
= folio_to_fs_info(folio
);
7202 struct btrfs_subpage
*subpage
;
7204 if (!btrfs_is_subpage(fs_info
, folio
->mapping
))
7207 ASSERT(folio_test_private(folio
) && folio_get_private(folio
));
7208 subpage
= folio_get_private(folio
);
7211 * This may look insane as we just acquire the spinlock and release it,
7212 * without doing anything. But we just want to make sure no one is
7213 * still holding the subpage spinlock.
7214 * And since the page is not dirty nor writeback, and we have page
7215 * locked, the only possible way to hold a spinlock is from the endio
7216 * function to clear page writeback.
7218 * Here we just acquire the spinlock so that all existing callers
7219 * should exit and we're safe to release/invalidate the page.
7221 spin_lock_irq(&subpage
->lock
);
7222 spin_unlock_irq(&subpage
->lock
);
7225 static int btrfs_launder_folio(struct folio
*folio
)
7227 return btrfs_qgroup_free_data(folio_to_inode(folio
), NULL
, folio_pos(folio
),
7231 static bool __btrfs_release_folio(struct folio
*folio
, gfp_t gfp_flags
)
7233 if (try_release_extent_mapping(folio
, gfp_flags
)) {
7234 wait_subpage_spinlock(folio
);
7235 clear_folio_extent_mapped(folio
);
7241 static bool btrfs_release_folio(struct folio
*folio
, gfp_t gfp_flags
)
7243 if (folio_test_writeback(folio
) || folio_test_dirty(folio
))
7245 return __btrfs_release_folio(folio
, gfp_flags
);
7248 #ifdef CONFIG_MIGRATION
7249 static int btrfs_migrate_folio(struct address_space
*mapping
,
7250 struct folio
*dst
, struct folio
*src
,
7251 enum migrate_mode mode
)
7253 int ret
= filemap_migrate_folio(mapping
, dst
, src
, mode
);
7255 if (ret
!= MIGRATEPAGE_SUCCESS
)
7258 if (folio_test_ordered(src
)) {
7259 folio_clear_ordered(src
);
7260 folio_set_ordered(dst
);
7263 return MIGRATEPAGE_SUCCESS
;
7266 #define btrfs_migrate_folio NULL
7269 static void btrfs_invalidate_folio(struct folio
*folio
, size_t offset
,
7272 struct btrfs_inode
*inode
= folio_to_inode(folio
);
7273 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
7274 struct extent_io_tree
*tree
= &inode
->io_tree
;
7275 struct extent_state
*cached_state
= NULL
;
7276 u64 page_start
= folio_pos(folio
);
7277 u64 page_end
= page_start
+ folio_size(folio
) - 1;
7279 int inode_evicting
= inode
->vfs_inode
.i_state
& I_FREEING
;
7282 * We have folio locked so no new ordered extent can be created on this
7283 * page, nor bio can be submitted for this folio.
7285 * But already submitted bio can still be finished on this folio.
7286 * Furthermore, endio function won't skip folio which has Ordered
7287 * already cleared, so it's possible for endio and
7288 * invalidate_folio to do the same ordered extent accounting twice
7291 * So here we wait for any submitted bios to finish, so that we won't
7292 * do double ordered extent accounting on the same folio.
7294 folio_wait_writeback(folio
);
7295 wait_subpage_spinlock(folio
);
7298 * For subpage case, we have call sites like
7299 * btrfs_punch_hole_lock_range() which passes range not aligned to
7301 * If the range doesn't cover the full folio, we don't need to and
7302 * shouldn't clear page extent mapped, as folio->private can still
7303 * record subpage dirty bits for other part of the range.
7305 * For cases that invalidate the full folio even the range doesn't
7306 * cover the full folio, like invalidating the last folio, we're
7307 * still safe to wait for ordered extent to finish.
7309 if (!(offset
== 0 && length
== folio_size(folio
))) {
7310 btrfs_release_folio(folio
, GFP_NOFS
);
7314 if (!inode_evicting
)
7315 lock_extent(tree
, page_start
, page_end
, &cached_state
);
7318 while (cur
< page_end
) {
7319 struct btrfs_ordered_extent
*ordered
;
7322 u32 extra_flags
= 0;
7324 ordered
= btrfs_lookup_first_ordered_range(inode
, cur
,
7325 page_end
+ 1 - cur
);
7327 range_end
= page_end
;
7329 * No ordered extent covering this range, we are safe
7330 * to delete all extent states in the range.
7332 extra_flags
= EXTENT_CLEAR_ALL_BITS
;
7335 if (ordered
->file_offset
> cur
) {
7337 * There is a range between [cur, oe->file_offset) not
7338 * covered by any ordered extent.
7339 * We are safe to delete all extent states, and handle
7340 * the ordered extent in the next iteration.
7342 range_end
= ordered
->file_offset
- 1;
7343 extra_flags
= EXTENT_CLEAR_ALL_BITS
;
7347 range_end
= min(ordered
->file_offset
+ ordered
->num_bytes
- 1,
7349 ASSERT(range_end
+ 1 - cur
< U32_MAX
);
7350 range_len
= range_end
+ 1 - cur
;
7351 if (!btrfs_folio_test_ordered(fs_info
, folio
, cur
, range_len
)) {
7353 * If Ordered is cleared, it means endio has
7354 * already been executed for the range.
7355 * We can't delete the extent states as
7356 * btrfs_finish_ordered_io() may still use some of them.
7360 btrfs_folio_clear_ordered(fs_info
, folio
, cur
, range_len
);
7363 * IO on this page will never be started, so we need to account
7364 * for any ordered extents now. Don't clear EXTENT_DELALLOC_NEW
7365 * here, must leave that up for the ordered extent completion.
7367 * This will also unlock the range for incoming
7368 * btrfs_finish_ordered_io().
7370 if (!inode_evicting
)
7371 clear_extent_bit(tree
, cur
, range_end
,
7373 EXTENT_LOCKED
| EXTENT_DO_ACCOUNTING
|
7374 EXTENT_DEFRAG
, &cached_state
);
7376 spin_lock_irq(&inode
->ordered_tree_lock
);
7377 set_bit(BTRFS_ORDERED_TRUNCATED
, &ordered
->flags
);
7378 ordered
->truncated_len
= min(ordered
->truncated_len
,
7379 cur
- ordered
->file_offset
);
7380 spin_unlock_irq(&inode
->ordered_tree_lock
);
7383 * If the ordered extent has finished, we're safe to delete all
7384 * the extent states of the range, otherwise
7385 * btrfs_finish_ordered_io() will get executed by endio for
7386 * other pages, so we can't delete extent states.
7388 if (btrfs_dec_test_ordered_pending(inode
, &ordered
,
7389 cur
, range_end
+ 1 - cur
)) {
7390 btrfs_finish_ordered_io(ordered
);
7392 * The ordered extent has finished, now we're again
7393 * safe to delete all extent states of the range.
7395 extra_flags
= EXTENT_CLEAR_ALL_BITS
;
7399 btrfs_put_ordered_extent(ordered
);
7401 * Qgroup reserved space handler
7402 * Sector(s) here will be either:
7404 * 1) Already written to disk or bio already finished
7405 * Then its QGROUP_RESERVED bit in io_tree is already cleared.
7406 * Qgroup will be handled by its qgroup_record then.
7407 * btrfs_qgroup_free_data() call will do nothing here.
7409 * 2) Not written to disk yet
7410 * Then btrfs_qgroup_free_data() call will clear the
7411 * QGROUP_RESERVED bit of its io_tree, and free the qgroup
7412 * reserved data space.
7413 * Since the IO will never happen for this page.
7415 btrfs_qgroup_free_data(inode
, NULL
, cur
, range_end
+ 1 - cur
, NULL
);
7416 if (!inode_evicting
) {
7417 clear_extent_bit(tree
, cur
, range_end
, EXTENT_LOCKED
|
7418 EXTENT_DELALLOC
| EXTENT_UPTODATE
|
7419 EXTENT_DO_ACCOUNTING
| EXTENT_DEFRAG
|
7420 extra_flags
, &cached_state
);
7422 cur
= range_end
+ 1;
7425 * We have iterated through all ordered extents of the page, the page
7426 * should not have Ordered anymore, or the above iteration
7427 * did something wrong.
7429 ASSERT(!folio_test_ordered(folio
));
7430 btrfs_folio_clear_checked(fs_info
, folio
, folio_pos(folio
), folio_size(folio
));
7431 if (!inode_evicting
)
7432 __btrfs_release_folio(folio
, GFP_NOFS
);
7433 clear_folio_extent_mapped(folio
);
7436 static int btrfs_truncate(struct btrfs_inode
*inode
, bool skip_writeback
)
7438 struct btrfs_truncate_control control
= {
7440 .ino
= btrfs_ino(inode
),
7441 .min_type
= BTRFS_EXTENT_DATA_KEY
,
7442 .clear_extent_range
= true,
7444 struct btrfs_root
*root
= inode
->root
;
7445 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
7446 struct btrfs_block_rsv
*rsv
;
7448 struct btrfs_trans_handle
*trans
;
7449 u64 mask
= fs_info
->sectorsize
- 1;
7450 const u64 min_size
= btrfs_calc_metadata_size(fs_info
, 1);
7452 if (!skip_writeback
) {
7453 ret
= btrfs_wait_ordered_range(inode
,
7454 inode
->vfs_inode
.i_size
& (~mask
),
7461 * Yes ladies and gentlemen, this is indeed ugly. We have a couple of
7462 * things going on here:
7464 * 1) We need to reserve space to update our inode.
7466 * 2) We need to have something to cache all the space that is going to
7467 * be free'd up by the truncate operation, but also have some slack
7468 * space reserved in case it uses space during the truncate (thank you
7469 * very much snapshotting).
7471 * And we need these to be separate. The fact is we can use a lot of
7472 * space doing the truncate, and we have no earthly idea how much space
7473 * we will use, so we need the truncate reservation to be separate so it
7474 * doesn't end up using space reserved for updating the inode. We also
7475 * need to be able to stop the transaction and start a new one, which
7476 * means we need to be able to update the inode several times, and we
7477 * have no idea of knowing how many times that will be, so we can't just
7478 * reserve 1 item for the entirety of the operation, so that has to be
7479 * done separately as well.
7481 * So that leaves us with
7483 * 1) rsv - for the truncate reservation, which we will steal from the
7484 * transaction reservation.
7485 * 2) fs_info->trans_block_rsv - this will have 1 items worth left for
7486 * updating the inode.
7488 rsv
= btrfs_alloc_block_rsv(fs_info
, BTRFS_BLOCK_RSV_TEMP
);
7491 rsv
->size
= min_size
;
7492 rsv
->failfast
= true;
7495 * 1 for the truncate slack space
7496 * 1 for updating the inode.
7498 trans
= btrfs_start_transaction(root
, 2);
7499 if (IS_ERR(trans
)) {
7500 ret
= PTR_ERR(trans
);
7504 /* Migrate the slack space for the truncate to our reserve */
7505 ret
= btrfs_block_rsv_migrate(&fs_info
->trans_block_rsv
, rsv
,
7508 * We have reserved 2 metadata units when we started the transaction and
7509 * min_size matches 1 unit, so this should never fail, but if it does,
7510 * it's not critical we just fail truncation.
7513 btrfs_end_transaction(trans
);
7517 trans
->block_rsv
= rsv
;
7520 struct extent_state
*cached_state
= NULL
;
7521 const u64 new_size
= inode
->vfs_inode
.i_size
;
7522 const u64 lock_start
= ALIGN_DOWN(new_size
, fs_info
->sectorsize
);
7524 control
.new_size
= new_size
;
7525 lock_extent(&inode
->io_tree
, lock_start
, (u64
)-1, &cached_state
);
7527 * We want to drop from the next block forward in case this new
7528 * size is not block aligned since we will be keeping the last
7529 * block of the extent just the way it is.
7531 btrfs_drop_extent_map_range(inode
,
7532 ALIGN(new_size
, fs_info
->sectorsize
),
7535 ret
= btrfs_truncate_inode_items(trans
, root
, &control
);
7537 inode_sub_bytes(&inode
->vfs_inode
, control
.sub_bytes
);
7538 btrfs_inode_safe_disk_i_size_write(inode
, control
.last_size
);
7540 unlock_extent(&inode
->io_tree
, lock_start
, (u64
)-1, &cached_state
);
7542 trans
->block_rsv
= &fs_info
->trans_block_rsv
;
7543 if (ret
!= -ENOSPC
&& ret
!= -EAGAIN
)
7546 ret
= btrfs_update_inode(trans
, inode
);
7550 btrfs_end_transaction(trans
);
7551 btrfs_btree_balance_dirty(fs_info
);
7553 trans
= btrfs_start_transaction(root
, 2);
7554 if (IS_ERR(trans
)) {
7555 ret
= PTR_ERR(trans
);
7560 btrfs_block_rsv_release(fs_info
, rsv
, -1, NULL
);
7561 ret
= btrfs_block_rsv_migrate(&fs_info
->trans_block_rsv
,
7562 rsv
, min_size
, false);
7564 * We have reserved 2 metadata units when we started the
7565 * transaction and min_size matches 1 unit, so this should never
7566 * fail, but if it does, it's not critical we just fail truncation.
7571 trans
->block_rsv
= rsv
;
7575 * We can't call btrfs_truncate_block inside a trans handle as we could
7576 * deadlock with freeze, if we got BTRFS_NEED_TRUNCATE_BLOCK then we
7577 * know we've truncated everything except the last little bit, and can
7578 * do btrfs_truncate_block and then update the disk_i_size.
7580 if (ret
== BTRFS_NEED_TRUNCATE_BLOCK
) {
7581 btrfs_end_transaction(trans
);
7582 btrfs_btree_balance_dirty(fs_info
);
7584 ret
= btrfs_truncate_block(inode
, inode
->vfs_inode
.i_size
, 0, 0);
7587 trans
= btrfs_start_transaction(root
, 1);
7588 if (IS_ERR(trans
)) {
7589 ret
= PTR_ERR(trans
);
7592 btrfs_inode_safe_disk_i_size_write(inode
, 0);
7598 trans
->block_rsv
= &fs_info
->trans_block_rsv
;
7599 ret2
= btrfs_update_inode(trans
, inode
);
7603 ret2
= btrfs_end_transaction(trans
);
7606 btrfs_btree_balance_dirty(fs_info
);
7609 btrfs_free_block_rsv(fs_info
, rsv
);
7611 * So if we truncate and then write and fsync we normally would just
7612 * write the extents that changed, which is a problem if we need to
7613 * first truncate that entire inode. So set this flag so we write out
7614 * all of the extents in the inode to the sync log so we're completely
7617 * If no extents were dropped or trimmed we don't need to force the next
7618 * fsync to truncate all the inode's items from the log and re-log them
7619 * all. This means the truncate operation did not change the file size,
7620 * or changed it to a smaller size but there was only an implicit hole
7621 * between the old i_size and the new i_size, and there were no prealloc
7622 * extents beyond i_size to drop.
7624 if (control
.extents_found
> 0)
7625 btrfs_set_inode_full_sync(inode
);
7630 struct inode
*btrfs_new_subvol_inode(struct mnt_idmap
*idmap
,
7633 struct inode
*inode
;
7635 inode
= new_inode(dir
->i_sb
);
7638 * Subvolumes don't inherit the sgid bit or the parent's gid if
7639 * the parent's sgid bit is set. This is probably a bug.
7641 inode_init_owner(idmap
, inode
, NULL
,
7642 S_IFDIR
| (~current_umask() & S_IRWXUGO
));
7643 inode
->i_op
= &btrfs_dir_inode_operations
;
7644 inode
->i_fop
= &btrfs_dir_file_operations
;
7649 struct inode
*btrfs_alloc_inode(struct super_block
*sb
)
7651 struct btrfs_fs_info
*fs_info
= btrfs_sb(sb
);
7652 struct btrfs_inode
*ei
;
7653 struct inode
*inode
;
7655 ei
= alloc_inode_sb(sb
, btrfs_inode_cachep
, GFP_KERNEL
);
7662 ei
->last_sub_trans
= 0;
7663 ei
->logged_trans
= 0;
7664 ei
->delalloc_bytes
= 0;
7665 ei
->new_delalloc_bytes
= 0;
7666 ei
->defrag_bytes
= 0;
7667 ei
->disk_i_size
= 0;
7671 * ->index_cnt will be properly initialized later when creating a new
7672 * inode (btrfs_create_new_inode()) or when reading an existing inode
7673 * from disk (btrfs_read_locked_inode()).
7677 ei
->last_unlink_trans
= 0;
7678 ei
->last_reflink_trans
= 0;
7679 ei
->last_log_commit
= 0;
7681 spin_lock_init(&ei
->lock
);
7682 ei
->outstanding_extents
= 0;
7683 if (sb
->s_magic
!= BTRFS_TEST_MAGIC
)
7684 btrfs_init_metadata_block_rsv(fs_info
, &ei
->block_rsv
,
7685 BTRFS_BLOCK_RSV_DELALLOC
);
7686 ei
->runtime_flags
= 0;
7687 ei
->prop_compress
= BTRFS_COMPRESS_NONE
;
7688 ei
->defrag_compress
= BTRFS_COMPRESS_NONE
;
7690 ei
->delayed_node
= NULL
;
7692 ei
->i_otime_sec
= 0;
7693 ei
->i_otime_nsec
= 0;
7695 inode
= &ei
->vfs_inode
;
7696 extent_map_tree_init(&ei
->extent_tree
);
7698 /* This io tree sets the valid inode. */
7699 extent_io_tree_init(fs_info
, &ei
->io_tree
, IO_TREE_INODE_IO
);
7700 ei
->io_tree
.inode
= ei
;
7702 ei
->file_extent_tree
= NULL
;
7704 mutex_init(&ei
->log_mutex
);
7705 spin_lock_init(&ei
->ordered_tree_lock
);
7706 ei
->ordered_tree
= RB_ROOT
;
7707 ei
->ordered_tree_last
= NULL
;
7708 INIT_LIST_HEAD(&ei
->delalloc_inodes
);
7709 INIT_LIST_HEAD(&ei
->delayed_iput
);
7710 init_rwsem(&ei
->i_mmap_lock
);
7715 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
7716 void btrfs_test_destroy_inode(struct inode
*inode
)
7718 btrfs_drop_extent_map_range(BTRFS_I(inode
), 0, (u64
)-1, false);
7719 kfree(BTRFS_I(inode
)->file_extent_tree
);
7720 kmem_cache_free(btrfs_inode_cachep
, BTRFS_I(inode
));
7724 void btrfs_free_inode(struct inode
*inode
)
7726 kfree(BTRFS_I(inode
)->file_extent_tree
);
7727 kmem_cache_free(btrfs_inode_cachep
, BTRFS_I(inode
));
7730 void btrfs_destroy_inode(struct inode
*vfs_inode
)
7732 struct btrfs_ordered_extent
*ordered
;
7733 struct btrfs_inode
*inode
= BTRFS_I(vfs_inode
);
7734 struct btrfs_root
*root
= inode
->root
;
7735 bool freespace_inode
;
7737 WARN_ON(!hlist_empty(&vfs_inode
->i_dentry
));
7738 WARN_ON(vfs_inode
->i_data
.nrpages
);
7739 WARN_ON(inode
->block_rsv
.reserved
);
7740 WARN_ON(inode
->block_rsv
.size
);
7741 WARN_ON(inode
->outstanding_extents
);
7742 if (!S_ISDIR(vfs_inode
->i_mode
)) {
7743 WARN_ON(inode
->delalloc_bytes
);
7744 WARN_ON(inode
->new_delalloc_bytes
);
7745 WARN_ON(inode
->csum_bytes
);
7747 if (!root
|| !btrfs_is_data_reloc_root(root
))
7748 WARN_ON(inode
->defrag_bytes
);
7751 * This can happen where we create an inode, but somebody else also
7752 * created the same inode and we need to destroy the one we already
7759 * If this is a free space inode do not take the ordered extents lockdep
7762 freespace_inode
= btrfs_is_free_space_inode(inode
);
7765 ordered
= btrfs_lookup_first_ordered_extent(inode
, (u64
)-1);
7769 btrfs_err(root
->fs_info
,
7770 "found ordered extent %llu %llu on inode cleanup",
7771 ordered
->file_offset
, ordered
->num_bytes
);
7773 if (!freespace_inode
)
7774 btrfs_lockdep_acquire(root
->fs_info
, btrfs_ordered_extent
);
7776 btrfs_remove_ordered_extent(inode
, ordered
);
7777 btrfs_put_ordered_extent(ordered
);
7778 btrfs_put_ordered_extent(ordered
);
7781 btrfs_qgroup_check_reserved_leak(inode
);
7782 btrfs_del_inode_from_root(inode
);
7783 btrfs_drop_extent_map_range(inode
, 0, (u64
)-1, false);
7784 btrfs_inode_clear_file_extent_range(inode
, 0, (u64
)-1);
7785 btrfs_put_root(inode
->root
);
7788 int btrfs_drop_inode(struct inode
*inode
)
7790 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
7795 /* the snap/subvol tree is on deleting */
7796 if (btrfs_root_refs(&root
->root_item
) == 0)
7799 return generic_drop_inode(inode
);
7802 static void init_once(void *foo
)
7804 struct btrfs_inode
*ei
= foo
;
7806 inode_init_once(&ei
->vfs_inode
);
7809 void __cold
btrfs_destroy_cachep(void)
7812 * Make sure all delayed rcu free inodes are flushed before we
7816 kmem_cache_destroy(btrfs_inode_cachep
);
7819 int __init
btrfs_init_cachep(void)
7821 btrfs_inode_cachep
= kmem_cache_create("btrfs_inode",
7822 sizeof(struct btrfs_inode
), 0,
7823 SLAB_RECLAIM_ACCOUNT
| SLAB_ACCOUNT
,
7825 if (!btrfs_inode_cachep
)
7831 static int btrfs_getattr(struct mnt_idmap
*idmap
,
7832 const struct path
*path
, struct kstat
*stat
,
7833 u32 request_mask
, unsigned int flags
)
7837 struct inode
*inode
= d_inode(path
->dentry
);
7838 u32 blocksize
= btrfs_sb(inode
->i_sb
)->sectorsize
;
7839 u32 bi_flags
= BTRFS_I(inode
)->flags
;
7840 u32 bi_ro_flags
= BTRFS_I(inode
)->ro_flags
;
7842 stat
->result_mask
|= STATX_BTIME
;
7843 stat
->btime
.tv_sec
= BTRFS_I(inode
)->i_otime_sec
;
7844 stat
->btime
.tv_nsec
= BTRFS_I(inode
)->i_otime_nsec
;
7845 if (bi_flags
& BTRFS_INODE_APPEND
)
7846 stat
->attributes
|= STATX_ATTR_APPEND
;
7847 if (bi_flags
& BTRFS_INODE_COMPRESS
)
7848 stat
->attributes
|= STATX_ATTR_COMPRESSED
;
7849 if (bi_flags
& BTRFS_INODE_IMMUTABLE
)
7850 stat
->attributes
|= STATX_ATTR_IMMUTABLE
;
7851 if (bi_flags
& BTRFS_INODE_NODUMP
)
7852 stat
->attributes
|= STATX_ATTR_NODUMP
;
7853 if (bi_ro_flags
& BTRFS_INODE_RO_VERITY
)
7854 stat
->attributes
|= STATX_ATTR_VERITY
;
7856 stat
->attributes_mask
|= (STATX_ATTR_APPEND
|
7857 STATX_ATTR_COMPRESSED
|
7858 STATX_ATTR_IMMUTABLE
|
7861 generic_fillattr(idmap
, request_mask
, inode
, stat
);
7862 stat
->dev
= BTRFS_I(inode
)->root
->anon_dev
;
7864 stat
->subvol
= BTRFS_I(inode
)->root
->root_key
.objectid
;
7865 stat
->result_mask
|= STATX_SUBVOL
;
7867 spin_lock(&BTRFS_I(inode
)->lock
);
7868 delalloc_bytes
= BTRFS_I(inode
)->new_delalloc_bytes
;
7869 inode_bytes
= inode_get_bytes(inode
);
7870 spin_unlock(&BTRFS_I(inode
)->lock
);
7871 stat
->blocks
= (ALIGN(inode_bytes
, blocksize
) +
7872 ALIGN(delalloc_bytes
, blocksize
)) >> SECTOR_SHIFT
;
7876 static int btrfs_rename_exchange(struct inode
*old_dir
,
7877 struct dentry
*old_dentry
,
7878 struct inode
*new_dir
,
7879 struct dentry
*new_dentry
)
7881 struct btrfs_fs_info
*fs_info
= inode_to_fs_info(old_dir
);
7882 struct btrfs_trans_handle
*trans
;
7883 unsigned int trans_num_items
;
7884 struct btrfs_root
*root
= BTRFS_I(old_dir
)->root
;
7885 struct btrfs_root
*dest
= BTRFS_I(new_dir
)->root
;
7886 struct inode
*new_inode
= new_dentry
->d_inode
;
7887 struct inode
*old_inode
= old_dentry
->d_inode
;
7888 struct btrfs_rename_ctx old_rename_ctx
;
7889 struct btrfs_rename_ctx new_rename_ctx
;
7890 u64 old_ino
= btrfs_ino(BTRFS_I(old_inode
));
7891 u64 new_ino
= btrfs_ino(BTRFS_I(new_inode
));
7896 bool need_abort
= false;
7897 struct fscrypt_name old_fname
, new_fname
;
7898 struct fscrypt_str
*old_name
, *new_name
;
7901 * For non-subvolumes allow exchange only within one subvolume, in the
7902 * same inode namespace. Two subvolumes (represented as directory) can
7903 * be exchanged as they're a logical link and have a fixed inode number.
7906 (old_ino
!= BTRFS_FIRST_FREE_OBJECTID
||
7907 new_ino
!= BTRFS_FIRST_FREE_OBJECTID
))
7910 ret
= fscrypt_setup_filename(old_dir
, &old_dentry
->d_name
, 0, &old_fname
);
7914 ret
= fscrypt_setup_filename(new_dir
, &new_dentry
->d_name
, 0, &new_fname
);
7916 fscrypt_free_filename(&old_fname
);
7920 old_name
= &old_fname
.disk_name
;
7921 new_name
= &new_fname
.disk_name
;
7923 /* close the race window with snapshot create/destroy ioctl */
7924 if (old_ino
== BTRFS_FIRST_FREE_OBJECTID
||
7925 new_ino
== BTRFS_FIRST_FREE_OBJECTID
)
7926 down_read(&fs_info
->subvol_sem
);
7930 * 1 to remove old dir item
7931 * 1 to remove old dir index
7932 * 1 to add new dir item
7933 * 1 to add new dir index
7934 * 1 to update parent inode
7936 * If the parents are the same, we only need to account for one
7938 trans_num_items
= (old_dir
== new_dir
? 9 : 10);
7939 if (old_ino
== BTRFS_FIRST_FREE_OBJECTID
) {
7941 * 1 to remove old root ref
7942 * 1 to remove old root backref
7943 * 1 to add new root ref
7944 * 1 to add new root backref
7946 trans_num_items
+= 4;
7949 * 1 to update inode item
7950 * 1 to remove old inode ref
7951 * 1 to add new inode ref
7953 trans_num_items
+= 3;
7955 if (new_ino
== BTRFS_FIRST_FREE_OBJECTID
)
7956 trans_num_items
+= 4;
7958 trans_num_items
+= 3;
7959 trans
= btrfs_start_transaction(root
, trans_num_items
);
7960 if (IS_ERR(trans
)) {
7961 ret
= PTR_ERR(trans
);
7966 ret
= btrfs_record_root_in_trans(trans
, dest
);
7972 * We need to find a free sequence number both in the source and
7973 * in the destination directory for the exchange.
7975 ret
= btrfs_set_inode_index(BTRFS_I(new_dir
), &old_idx
);
7978 ret
= btrfs_set_inode_index(BTRFS_I(old_dir
), &new_idx
);
7982 BTRFS_I(old_inode
)->dir_index
= 0ULL;
7983 BTRFS_I(new_inode
)->dir_index
= 0ULL;
7985 /* Reference for the source. */
7986 if (old_ino
== BTRFS_FIRST_FREE_OBJECTID
) {
7987 /* force full log commit if subvolume involved. */
7988 btrfs_set_log_full_commit(trans
);
7990 ret
= btrfs_insert_inode_ref(trans
, dest
, new_name
, old_ino
,
7991 btrfs_ino(BTRFS_I(new_dir
)),
7998 /* And now for the dest. */
7999 if (new_ino
== BTRFS_FIRST_FREE_OBJECTID
) {
8000 /* force full log commit if subvolume involved. */
8001 btrfs_set_log_full_commit(trans
);
8003 ret
= btrfs_insert_inode_ref(trans
, root
, old_name
, new_ino
,
8004 btrfs_ino(BTRFS_I(old_dir
)),
8008 btrfs_abort_transaction(trans
, ret
);
8013 /* Update inode version and ctime/mtime. */
8014 inode_inc_iversion(old_dir
);
8015 inode_inc_iversion(new_dir
);
8016 inode_inc_iversion(old_inode
);
8017 inode_inc_iversion(new_inode
);
8018 simple_rename_timestamp(old_dir
, old_dentry
, new_dir
, new_dentry
);
8020 if (old_dentry
->d_parent
!= new_dentry
->d_parent
) {
8021 btrfs_record_unlink_dir(trans
, BTRFS_I(old_dir
),
8022 BTRFS_I(old_inode
), true);
8023 btrfs_record_unlink_dir(trans
, BTRFS_I(new_dir
),
8024 BTRFS_I(new_inode
), true);
8027 /* src is a subvolume */
8028 if (old_ino
== BTRFS_FIRST_FREE_OBJECTID
) {
8029 ret
= btrfs_unlink_subvol(trans
, BTRFS_I(old_dir
), old_dentry
);
8030 } else { /* src is an inode */
8031 ret
= __btrfs_unlink_inode(trans
, BTRFS_I(old_dir
),
8032 BTRFS_I(old_dentry
->d_inode
),
8033 old_name
, &old_rename_ctx
);
8035 ret
= btrfs_update_inode(trans
, BTRFS_I(old_inode
));
8038 btrfs_abort_transaction(trans
, ret
);
8042 /* dest is a subvolume */
8043 if (new_ino
== BTRFS_FIRST_FREE_OBJECTID
) {
8044 ret
= btrfs_unlink_subvol(trans
, BTRFS_I(new_dir
), new_dentry
);
8045 } else { /* dest is an inode */
8046 ret
= __btrfs_unlink_inode(trans
, BTRFS_I(new_dir
),
8047 BTRFS_I(new_dentry
->d_inode
),
8048 new_name
, &new_rename_ctx
);
8050 ret
= btrfs_update_inode(trans
, BTRFS_I(new_inode
));
8053 btrfs_abort_transaction(trans
, ret
);
8057 ret
= btrfs_add_link(trans
, BTRFS_I(new_dir
), BTRFS_I(old_inode
),
8058 new_name
, 0, old_idx
);
8060 btrfs_abort_transaction(trans
, ret
);
8064 ret
= btrfs_add_link(trans
, BTRFS_I(old_dir
), BTRFS_I(new_inode
),
8065 old_name
, 0, new_idx
);
8067 btrfs_abort_transaction(trans
, ret
);
8071 if (old_inode
->i_nlink
== 1)
8072 BTRFS_I(old_inode
)->dir_index
= old_idx
;
8073 if (new_inode
->i_nlink
== 1)
8074 BTRFS_I(new_inode
)->dir_index
= new_idx
;
8077 * Now pin the logs of the roots. We do it to ensure that no other task
8078 * can sync the logs while we are in progress with the rename, because
8079 * that could result in an inconsistency in case any of the inodes that
8080 * are part of this rename operation were logged before.
8082 if (old_ino
!= BTRFS_FIRST_FREE_OBJECTID
)
8083 btrfs_pin_log_trans(root
);
8084 if (new_ino
!= BTRFS_FIRST_FREE_OBJECTID
)
8085 btrfs_pin_log_trans(dest
);
8087 /* Do the log updates for all inodes. */
8088 if (old_ino
!= BTRFS_FIRST_FREE_OBJECTID
)
8089 btrfs_log_new_name(trans
, old_dentry
, BTRFS_I(old_dir
),
8090 old_rename_ctx
.index
, new_dentry
->d_parent
);
8091 if (new_ino
!= BTRFS_FIRST_FREE_OBJECTID
)
8092 btrfs_log_new_name(trans
, new_dentry
, BTRFS_I(new_dir
),
8093 new_rename_ctx
.index
, old_dentry
->d_parent
);
8095 /* Now unpin the logs. */
8096 if (old_ino
!= BTRFS_FIRST_FREE_OBJECTID
)
8097 btrfs_end_log_trans(root
);
8098 if (new_ino
!= BTRFS_FIRST_FREE_OBJECTID
)
8099 btrfs_end_log_trans(dest
);
8101 ret2
= btrfs_end_transaction(trans
);
8102 ret
= ret
? ret
: ret2
;
8104 if (new_ino
== BTRFS_FIRST_FREE_OBJECTID
||
8105 old_ino
== BTRFS_FIRST_FREE_OBJECTID
)
8106 up_read(&fs_info
->subvol_sem
);
8108 fscrypt_free_filename(&new_fname
);
8109 fscrypt_free_filename(&old_fname
);
8113 static struct inode
*new_whiteout_inode(struct mnt_idmap
*idmap
,
8116 struct inode
*inode
;
8118 inode
= new_inode(dir
->i_sb
);
8120 inode_init_owner(idmap
, inode
, dir
,
8121 S_IFCHR
| WHITEOUT_MODE
);
8122 inode
->i_op
= &btrfs_special_inode_operations
;
8123 init_special_inode(inode
, inode
->i_mode
, WHITEOUT_DEV
);
8128 static int btrfs_rename(struct mnt_idmap
*idmap
,
8129 struct inode
*old_dir
, struct dentry
*old_dentry
,
8130 struct inode
*new_dir
, struct dentry
*new_dentry
,
8133 struct btrfs_fs_info
*fs_info
= inode_to_fs_info(old_dir
);
8134 struct btrfs_new_inode_args whiteout_args
= {
8136 .dentry
= old_dentry
,
8138 struct btrfs_trans_handle
*trans
;
8139 unsigned int trans_num_items
;
8140 struct btrfs_root
*root
= BTRFS_I(old_dir
)->root
;
8141 struct btrfs_root
*dest
= BTRFS_I(new_dir
)->root
;
8142 struct inode
*new_inode
= d_inode(new_dentry
);
8143 struct inode
*old_inode
= d_inode(old_dentry
);
8144 struct btrfs_rename_ctx rename_ctx
;
8148 u64 old_ino
= btrfs_ino(BTRFS_I(old_inode
));
8149 struct fscrypt_name old_fname
, new_fname
;
8151 if (btrfs_ino(BTRFS_I(new_dir
)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID
)
8154 /* we only allow rename subvolume link between subvolumes */
8155 if (old_ino
!= BTRFS_FIRST_FREE_OBJECTID
&& root
!= dest
)
8158 if (old_ino
== BTRFS_EMPTY_SUBVOL_DIR_OBJECTID
||
8159 (new_inode
&& btrfs_ino(BTRFS_I(new_inode
)) == BTRFS_FIRST_FREE_OBJECTID
))
8162 if (S_ISDIR(old_inode
->i_mode
) && new_inode
&&
8163 new_inode
->i_size
> BTRFS_EMPTY_DIR_SIZE
)
8166 ret
= fscrypt_setup_filename(old_dir
, &old_dentry
->d_name
, 0, &old_fname
);
8170 ret
= fscrypt_setup_filename(new_dir
, &new_dentry
->d_name
, 0, &new_fname
);
8172 fscrypt_free_filename(&old_fname
);
8176 /* check for collisions, even if the name isn't there */
8177 ret
= btrfs_check_dir_item_collision(dest
, new_dir
->i_ino
, &new_fname
.disk_name
);
8179 if (ret
== -EEXIST
) {
8181 * eexist without a new_inode */
8182 if (WARN_ON(!new_inode
)) {
8183 goto out_fscrypt_names
;
8186 /* maybe -EOVERFLOW */
8187 goto out_fscrypt_names
;
8193 * we're using rename to replace one file with another. Start IO on it
8194 * now so we don't add too much work to the end of the transaction
8196 if (new_inode
&& S_ISREG(old_inode
->i_mode
) && new_inode
->i_size
)
8197 filemap_flush(old_inode
->i_mapping
);
8199 if (flags
& RENAME_WHITEOUT
) {
8200 whiteout_args
.inode
= new_whiteout_inode(idmap
, old_dir
);
8201 if (!whiteout_args
.inode
) {
8203 goto out_fscrypt_names
;
8205 ret
= btrfs_new_inode_prepare(&whiteout_args
, &trans_num_items
);
8207 goto out_whiteout_inode
;
8209 /* 1 to update the old parent inode. */
8210 trans_num_items
= 1;
8213 if (old_ino
== BTRFS_FIRST_FREE_OBJECTID
) {
8214 /* Close the race window with snapshot create/destroy ioctl */
8215 down_read(&fs_info
->subvol_sem
);
8217 * 1 to remove old root ref
8218 * 1 to remove old root backref
8219 * 1 to add new root ref
8220 * 1 to add new root backref
8222 trans_num_items
+= 4;
8226 * 1 to remove old inode ref
8227 * 1 to add new inode ref
8229 trans_num_items
+= 3;
8232 * 1 to remove old dir item
8233 * 1 to remove old dir index
8234 * 1 to add new dir item
8235 * 1 to add new dir index
8237 trans_num_items
+= 4;
8238 /* 1 to update new parent inode if it's not the same as the old parent */
8239 if (new_dir
!= old_dir
)
8244 * 1 to remove inode ref
8245 * 1 to remove dir item
8246 * 1 to remove dir index
8247 * 1 to possibly add orphan item
8249 trans_num_items
+= 5;
8251 trans
= btrfs_start_transaction(root
, trans_num_items
);
8252 if (IS_ERR(trans
)) {
8253 ret
= PTR_ERR(trans
);
8258 ret
= btrfs_record_root_in_trans(trans
, dest
);
8263 ret
= btrfs_set_inode_index(BTRFS_I(new_dir
), &index
);
8267 BTRFS_I(old_inode
)->dir_index
= 0ULL;
8268 if (unlikely(old_ino
== BTRFS_FIRST_FREE_OBJECTID
)) {
8269 /* force full log commit if subvolume involved. */
8270 btrfs_set_log_full_commit(trans
);
8272 ret
= btrfs_insert_inode_ref(trans
, dest
, &new_fname
.disk_name
,
8273 old_ino
, btrfs_ino(BTRFS_I(new_dir
)),
8279 inode_inc_iversion(old_dir
);
8280 inode_inc_iversion(new_dir
);
8281 inode_inc_iversion(old_inode
);
8282 simple_rename_timestamp(old_dir
, old_dentry
, new_dir
, new_dentry
);
8284 if (old_dentry
->d_parent
!= new_dentry
->d_parent
)
8285 btrfs_record_unlink_dir(trans
, BTRFS_I(old_dir
),
8286 BTRFS_I(old_inode
), true);
8288 if (unlikely(old_ino
== BTRFS_FIRST_FREE_OBJECTID
)) {
8289 ret
= btrfs_unlink_subvol(trans
, BTRFS_I(old_dir
), old_dentry
);
8291 ret
= __btrfs_unlink_inode(trans
, BTRFS_I(old_dir
),
8292 BTRFS_I(d_inode(old_dentry
)),
8293 &old_fname
.disk_name
, &rename_ctx
);
8295 ret
= btrfs_update_inode(trans
, BTRFS_I(old_inode
));
8298 btrfs_abort_transaction(trans
, ret
);
8303 inode_inc_iversion(new_inode
);
8304 if (unlikely(btrfs_ino(BTRFS_I(new_inode
)) ==
8305 BTRFS_EMPTY_SUBVOL_DIR_OBJECTID
)) {
8306 ret
= btrfs_unlink_subvol(trans
, BTRFS_I(new_dir
), new_dentry
);
8307 BUG_ON(new_inode
->i_nlink
== 0);
8309 ret
= btrfs_unlink_inode(trans
, BTRFS_I(new_dir
),
8310 BTRFS_I(d_inode(new_dentry
)),
8311 &new_fname
.disk_name
);
8313 if (!ret
&& new_inode
->i_nlink
== 0)
8314 ret
= btrfs_orphan_add(trans
,
8315 BTRFS_I(d_inode(new_dentry
)));
8317 btrfs_abort_transaction(trans
, ret
);
8322 ret
= btrfs_add_link(trans
, BTRFS_I(new_dir
), BTRFS_I(old_inode
),
8323 &new_fname
.disk_name
, 0, index
);
8325 btrfs_abort_transaction(trans
, ret
);
8329 if (old_inode
->i_nlink
== 1)
8330 BTRFS_I(old_inode
)->dir_index
= index
;
8332 if (old_ino
!= BTRFS_FIRST_FREE_OBJECTID
)
8333 btrfs_log_new_name(trans
, old_dentry
, BTRFS_I(old_dir
),
8334 rename_ctx
.index
, new_dentry
->d_parent
);
8336 if (flags
& RENAME_WHITEOUT
) {
8337 ret
= btrfs_create_new_inode(trans
, &whiteout_args
);
8339 btrfs_abort_transaction(trans
, ret
);
8342 unlock_new_inode(whiteout_args
.inode
);
8343 iput(whiteout_args
.inode
);
8344 whiteout_args
.inode
= NULL
;
8348 ret2
= btrfs_end_transaction(trans
);
8349 ret
= ret
? ret
: ret2
;
8351 if (old_ino
== BTRFS_FIRST_FREE_OBJECTID
)
8352 up_read(&fs_info
->subvol_sem
);
8353 if (flags
& RENAME_WHITEOUT
)
8354 btrfs_new_inode_args_destroy(&whiteout_args
);
8356 if (flags
& RENAME_WHITEOUT
)
8357 iput(whiteout_args
.inode
);
8359 fscrypt_free_filename(&old_fname
);
8360 fscrypt_free_filename(&new_fname
);
8364 static int btrfs_rename2(struct mnt_idmap
*idmap
, struct inode
*old_dir
,
8365 struct dentry
*old_dentry
, struct inode
*new_dir
,
8366 struct dentry
*new_dentry
, unsigned int flags
)
8370 if (flags
& ~(RENAME_NOREPLACE
| RENAME_EXCHANGE
| RENAME_WHITEOUT
))
8373 if (flags
& RENAME_EXCHANGE
)
8374 ret
= btrfs_rename_exchange(old_dir
, old_dentry
, new_dir
,
8377 ret
= btrfs_rename(idmap
, old_dir
, old_dentry
, new_dir
,
8380 btrfs_btree_balance_dirty(BTRFS_I(new_dir
)->root
->fs_info
);
8385 struct btrfs_delalloc_work
{
8386 struct inode
*inode
;
8387 struct completion completion
;
8388 struct list_head list
;
8389 struct btrfs_work work
;
8392 static void btrfs_run_delalloc_work(struct btrfs_work
*work
)
8394 struct btrfs_delalloc_work
*delalloc_work
;
8395 struct inode
*inode
;
8397 delalloc_work
= container_of(work
, struct btrfs_delalloc_work
,
8399 inode
= delalloc_work
->inode
;
8400 filemap_flush(inode
->i_mapping
);
8401 if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT
,
8402 &BTRFS_I(inode
)->runtime_flags
))
8403 filemap_flush(inode
->i_mapping
);
8406 complete(&delalloc_work
->completion
);
8409 static struct btrfs_delalloc_work
*btrfs_alloc_delalloc_work(struct inode
*inode
)
8411 struct btrfs_delalloc_work
*work
;
8413 work
= kmalloc(sizeof(*work
), GFP_NOFS
);
8417 init_completion(&work
->completion
);
8418 INIT_LIST_HEAD(&work
->list
);
8419 work
->inode
= inode
;
8420 btrfs_init_work(&work
->work
, btrfs_run_delalloc_work
, NULL
);
8426 * some fairly slow code that needs optimization. This walks the list
8427 * of all the inodes with pending delalloc and forces them to disk.
8429 static int start_delalloc_inodes(struct btrfs_root
*root
,
8430 struct writeback_control
*wbc
, bool snapshot
,
8431 bool in_reclaim_context
)
8433 struct btrfs_inode
*binode
;
8434 struct inode
*inode
;
8435 struct btrfs_delalloc_work
*work
, *next
;
8439 bool full_flush
= wbc
->nr_to_write
== LONG_MAX
;
8441 mutex_lock(&root
->delalloc_mutex
);
8442 spin_lock(&root
->delalloc_lock
);
8443 list_splice_init(&root
->delalloc_inodes
, &splice
);
8444 while (!list_empty(&splice
)) {
8445 binode
= list_entry(splice
.next
, struct btrfs_inode
,
8448 list_move_tail(&binode
->delalloc_inodes
,
8449 &root
->delalloc_inodes
);
8451 if (in_reclaim_context
&&
8452 test_bit(BTRFS_INODE_NO_DELALLOC_FLUSH
, &binode
->runtime_flags
))
8455 inode
= igrab(&binode
->vfs_inode
);
8457 cond_resched_lock(&root
->delalloc_lock
);
8460 spin_unlock(&root
->delalloc_lock
);
8463 set_bit(BTRFS_INODE_SNAPSHOT_FLUSH
,
8464 &binode
->runtime_flags
);
8466 work
= btrfs_alloc_delalloc_work(inode
);
8472 list_add_tail(&work
->list
, &works
);
8473 btrfs_queue_work(root
->fs_info
->flush_workers
,
8476 ret
= filemap_fdatawrite_wbc(inode
->i_mapping
, wbc
);
8477 btrfs_add_delayed_iput(BTRFS_I(inode
));
8478 if (ret
|| wbc
->nr_to_write
<= 0)
8482 spin_lock(&root
->delalloc_lock
);
8484 spin_unlock(&root
->delalloc_lock
);
8487 list_for_each_entry_safe(work
, next
, &works
, list
) {
8488 list_del_init(&work
->list
);
8489 wait_for_completion(&work
->completion
);
8493 if (!list_empty(&splice
)) {
8494 spin_lock(&root
->delalloc_lock
);
8495 list_splice_tail(&splice
, &root
->delalloc_inodes
);
8496 spin_unlock(&root
->delalloc_lock
);
8498 mutex_unlock(&root
->delalloc_mutex
);
8502 int btrfs_start_delalloc_snapshot(struct btrfs_root
*root
, bool in_reclaim_context
)
8504 struct writeback_control wbc
= {
8505 .nr_to_write
= LONG_MAX
,
8506 .sync_mode
= WB_SYNC_NONE
,
8508 .range_end
= LLONG_MAX
,
8510 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
8512 if (BTRFS_FS_ERROR(fs_info
))
8515 return start_delalloc_inodes(root
, &wbc
, true, in_reclaim_context
);
8518 int btrfs_start_delalloc_roots(struct btrfs_fs_info
*fs_info
, long nr
,
8519 bool in_reclaim_context
)
8521 struct writeback_control wbc
= {
8523 .sync_mode
= WB_SYNC_NONE
,
8525 .range_end
= LLONG_MAX
,
8527 struct btrfs_root
*root
;
8531 if (BTRFS_FS_ERROR(fs_info
))
8534 mutex_lock(&fs_info
->delalloc_root_mutex
);
8535 spin_lock(&fs_info
->delalloc_root_lock
);
8536 list_splice_init(&fs_info
->delalloc_roots
, &splice
);
8537 while (!list_empty(&splice
)) {
8539 * Reset nr_to_write here so we know that we're doing a full
8543 wbc
.nr_to_write
= LONG_MAX
;
8545 root
= list_first_entry(&splice
, struct btrfs_root
,
8547 root
= btrfs_grab_root(root
);
8549 list_move_tail(&root
->delalloc_root
,
8550 &fs_info
->delalloc_roots
);
8551 spin_unlock(&fs_info
->delalloc_root_lock
);
8553 ret
= start_delalloc_inodes(root
, &wbc
, false, in_reclaim_context
);
8554 btrfs_put_root(root
);
8555 if (ret
< 0 || wbc
.nr_to_write
<= 0)
8557 spin_lock(&fs_info
->delalloc_root_lock
);
8559 spin_unlock(&fs_info
->delalloc_root_lock
);
8563 if (!list_empty(&splice
)) {
8564 spin_lock(&fs_info
->delalloc_root_lock
);
8565 list_splice_tail(&splice
, &fs_info
->delalloc_roots
);
8566 spin_unlock(&fs_info
->delalloc_root_lock
);
8568 mutex_unlock(&fs_info
->delalloc_root_mutex
);
8572 static int btrfs_symlink(struct mnt_idmap
*idmap
, struct inode
*dir
,
8573 struct dentry
*dentry
, const char *symname
)
8575 struct btrfs_fs_info
*fs_info
= inode_to_fs_info(dir
);
8576 struct btrfs_trans_handle
*trans
;
8577 struct btrfs_root
*root
= BTRFS_I(dir
)->root
;
8578 struct btrfs_path
*path
;
8579 struct btrfs_key key
;
8580 struct inode
*inode
;
8581 struct btrfs_new_inode_args new_inode_args
= {
8585 unsigned int trans_num_items
;
8590 struct btrfs_file_extent_item
*ei
;
8591 struct extent_buffer
*leaf
;
8593 name_len
= strlen(symname
);
8594 if (name_len
> BTRFS_MAX_INLINE_DATA_SIZE(fs_info
))
8595 return -ENAMETOOLONG
;
8597 inode
= new_inode(dir
->i_sb
);
8600 inode_init_owner(idmap
, inode
, dir
, S_IFLNK
| S_IRWXUGO
);
8601 inode
->i_op
= &btrfs_symlink_inode_operations
;
8602 inode_nohighmem(inode
);
8603 inode
->i_mapping
->a_ops
= &btrfs_aops
;
8604 btrfs_i_size_write(BTRFS_I(inode
), name_len
);
8605 inode_set_bytes(inode
, name_len
);
8607 new_inode_args
.inode
= inode
;
8608 err
= btrfs_new_inode_prepare(&new_inode_args
, &trans_num_items
);
8611 /* 1 additional item for the inline extent */
8614 trans
= btrfs_start_transaction(root
, trans_num_items
);
8615 if (IS_ERR(trans
)) {
8616 err
= PTR_ERR(trans
);
8617 goto out_new_inode_args
;
8620 err
= btrfs_create_new_inode(trans
, &new_inode_args
);
8624 path
= btrfs_alloc_path();
8627 btrfs_abort_transaction(trans
, err
);
8628 discard_new_inode(inode
);
8632 key
.objectid
= btrfs_ino(BTRFS_I(inode
));
8634 key
.type
= BTRFS_EXTENT_DATA_KEY
;
8635 datasize
= btrfs_file_extent_calc_inline_size(name_len
);
8636 err
= btrfs_insert_empty_item(trans
, root
, path
, &key
,
8639 btrfs_abort_transaction(trans
, err
);
8640 btrfs_free_path(path
);
8641 discard_new_inode(inode
);
8645 leaf
= path
->nodes
[0];
8646 ei
= btrfs_item_ptr(leaf
, path
->slots
[0],
8647 struct btrfs_file_extent_item
);
8648 btrfs_set_file_extent_generation(leaf
, ei
, trans
->transid
);
8649 btrfs_set_file_extent_type(leaf
, ei
,
8650 BTRFS_FILE_EXTENT_INLINE
);
8651 btrfs_set_file_extent_encryption(leaf
, ei
, 0);
8652 btrfs_set_file_extent_compression(leaf
, ei
, 0);
8653 btrfs_set_file_extent_other_encoding(leaf
, ei
, 0);
8654 btrfs_set_file_extent_ram_bytes(leaf
, ei
, name_len
);
8656 ptr
= btrfs_file_extent_inline_start(ei
);
8657 write_extent_buffer(leaf
, symname
, ptr
, name_len
);
8658 btrfs_mark_buffer_dirty(trans
, leaf
);
8659 btrfs_free_path(path
);
8661 d_instantiate_new(dentry
, inode
);
8664 btrfs_end_transaction(trans
);
8665 btrfs_btree_balance_dirty(fs_info
);
8667 btrfs_new_inode_args_destroy(&new_inode_args
);
8674 static struct btrfs_trans_handle
*insert_prealloc_file_extent(
8675 struct btrfs_trans_handle
*trans_in
,
8676 struct btrfs_inode
*inode
,
8677 struct btrfs_key
*ins
,
8680 struct btrfs_file_extent_item stack_fi
;
8681 struct btrfs_replace_extent_info extent_info
;
8682 struct btrfs_trans_handle
*trans
= trans_in
;
8683 struct btrfs_path
*path
;
8684 u64 start
= ins
->objectid
;
8685 u64 len
= ins
->offset
;
8686 u64 qgroup_released
= 0;
8689 memset(&stack_fi
, 0, sizeof(stack_fi
));
8691 btrfs_set_stack_file_extent_type(&stack_fi
, BTRFS_FILE_EXTENT_PREALLOC
);
8692 btrfs_set_stack_file_extent_disk_bytenr(&stack_fi
, start
);
8693 btrfs_set_stack_file_extent_disk_num_bytes(&stack_fi
, len
);
8694 btrfs_set_stack_file_extent_num_bytes(&stack_fi
, len
);
8695 btrfs_set_stack_file_extent_ram_bytes(&stack_fi
, len
);
8696 btrfs_set_stack_file_extent_compression(&stack_fi
, BTRFS_COMPRESS_NONE
);
8697 /* Encryption and other encoding is reserved and all 0 */
8699 ret
= btrfs_qgroup_release_data(inode
, file_offset
, len
, &qgroup_released
);
8701 return ERR_PTR(ret
);
8704 ret
= insert_reserved_file_extent(trans
, inode
,
8705 file_offset
, &stack_fi
,
8706 true, qgroup_released
);
8712 extent_info
.disk_offset
= start
;
8713 extent_info
.disk_len
= len
;
8714 extent_info
.data_offset
= 0;
8715 extent_info
.data_len
= len
;
8716 extent_info
.file_offset
= file_offset
;
8717 extent_info
.extent_buf
= (char *)&stack_fi
;
8718 extent_info
.is_new_extent
= true;
8719 extent_info
.update_times
= true;
8720 extent_info
.qgroup_reserved
= qgroup_released
;
8721 extent_info
.insertions
= 0;
8723 path
= btrfs_alloc_path();
8729 ret
= btrfs_replace_file_extents(inode
, path
, file_offset
,
8730 file_offset
+ len
- 1, &extent_info
,
8732 btrfs_free_path(path
);
8739 * We have released qgroup data range at the beginning of the function,
8740 * and normally qgroup_released bytes will be freed when committing
8742 * But if we error out early, we have to free what we have released
8743 * or we leak qgroup data reservation.
8745 btrfs_qgroup_free_refroot(inode
->root
->fs_info
,
8746 btrfs_root_id(inode
->root
), qgroup_released
,
8747 BTRFS_QGROUP_RSV_DATA
);
8748 return ERR_PTR(ret
);
8751 static int __btrfs_prealloc_file_range(struct inode
*inode
, int mode
,
8752 u64 start
, u64 num_bytes
, u64 min_size
,
8753 loff_t actual_len
, u64
*alloc_hint
,
8754 struct btrfs_trans_handle
*trans
)
8756 struct btrfs_fs_info
*fs_info
= inode_to_fs_info(inode
);
8757 struct extent_map
*em
;
8758 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
8759 struct btrfs_key ins
;
8760 u64 cur_offset
= start
;
8761 u64 clear_offset
= start
;
8764 u64 last_alloc
= (u64
)-1;
8766 bool own_trans
= true;
8767 u64 end
= start
+ num_bytes
- 1;
8771 while (num_bytes
> 0) {
8772 cur_bytes
= min_t(u64
, num_bytes
, SZ_256M
);
8773 cur_bytes
= max(cur_bytes
, min_size
);
8775 * If we are severely fragmented we could end up with really
8776 * small allocations, so if the allocator is returning small
8777 * chunks lets make its job easier by only searching for those
8780 cur_bytes
= min(cur_bytes
, last_alloc
);
8781 ret
= btrfs_reserve_extent(root
, cur_bytes
, cur_bytes
,
8782 min_size
, 0, *alloc_hint
, &ins
, 1, 0);
8787 * We've reserved this space, and thus converted it from
8788 * ->bytes_may_use to ->bytes_reserved. Any error that happens
8789 * from here on out we will only need to clear our reservation
8790 * for the remaining unreserved area, so advance our
8791 * clear_offset by our extent size.
8793 clear_offset
+= ins
.offset
;
8795 last_alloc
= ins
.offset
;
8796 trans
= insert_prealloc_file_extent(trans
, BTRFS_I(inode
),
8799 * Now that we inserted the prealloc extent we can finally
8800 * decrement the number of reservations in the block group.
8801 * If we did it before, we could race with relocation and have
8802 * relocation miss the reserved extent, making it fail later.
8804 btrfs_dec_block_group_reservations(fs_info
, ins
.objectid
);
8805 if (IS_ERR(trans
)) {
8806 ret
= PTR_ERR(trans
);
8807 btrfs_free_reserved_extent(fs_info
, ins
.objectid
,
8812 em
= alloc_extent_map();
8814 btrfs_drop_extent_map_range(BTRFS_I(inode
), cur_offset
,
8815 cur_offset
+ ins
.offset
- 1, false);
8816 btrfs_set_inode_full_sync(BTRFS_I(inode
));
8820 em
->start
= cur_offset
;
8821 em
->len
= ins
.offset
;
8822 em
->disk_bytenr
= ins
.objectid
;
8824 em
->disk_num_bytes
= ins
.offset
;
8825 em
->ram_bytes
= ins
.offset
;
8826 em
->flags
|= EXTENT_FLAG_PREALLOC
;
8827 em
->generation
= trans
->transid
;
8829 ret
= btrfs_replace_extent_map_range(BTRFS_I(inode
), em
, true);
8830 free_extent_map(em
);
8832 num_bytes
-= ins
.offset
;
8833 cur_offset
+= ins
.offset
;
8834 *alloc_hint
= ins
.objectid
+ ins
.offset
;
8836 inode_inc_iversion(inode
);
8837 inode_set_ctime_current(inode
);
8838 BTRFS_I(inode
)->flags
|= BTRFS_INODE_PREALLOC
;
8839 if (!(mode
& FALLOC_FL_KEEP_SIZE
) &&
8840 (actual_len
> inode
->i_size
) &&
8841 (cur_offset
> inode
->i_size
)) {
8842 if (cur_offset
> actual_len
)
8843 i_size
= actual_len
;
8845 i_size
= cur_offset
;
8846 i_size_write(inode
, i_size
);
8847 btrfs_inode_safe_disk_i_size_write(BTRFS_I(inode
), 0);
8850 ret
= btrfs_update_inode(trans
, BTRFS_I(inode
));
8853 btrfs_abort_transaction(trans
, ret
);
8855 btrfs_end_transaction(trans
);
8860 btrfs_end_transaction(trans
);
8864 if (clear_offset
< end
)
8865 btrfs_free_reserved_data_space(BTRFS_I(inode
), NULL
, clear_offset
,
8866 end
- clear_offset
+ 1);
8870 int btrfs_prealloc_file_range(struct inode
*inode
, int mode
,
8871 u64 start
, u64 num_bytes
, u64 min_size
,
8872 loff_t actual_len
, u64
*alloc_hint
)
8874 return __btrfs_prealloc_file_range(inode
, mode
, start
, num_bytes
,
8875 min_size
, actual_len
, alloc_hint
,
8879 int btrfs_prealloc_file_range_trans(struct inode
*inode
,
8880 struct btrfs_trans_handle
*trans
, int mode
,
8881 u64 start
, u64 num_bytes
, u64 min_size
,
8882 loff_t actual_len
, u64
*alloc_hint
)
8884 return __btrfs_prealloc_file_range(inode
, mode
, start
, num_bytes
,
8885 min_size
, actual_len
, alloc_hint
, trans
);
8888 static int btrfs_permission(struct mnt_idmap
*idmap
,
8889 struct inode
*inode
, int mask
)
8891 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
8892 umode_t mode
= inode
->i_mode
;
8894 if (mask
& MAY_WRITE
&&
8895 (S_ISREG(mode
) || S_ISDIR(mode
) || S_ISLNK(mode
))) {
8896 if (btrfs_root_readonly(root
))
8898 if (BTRFS_I(inode
)->flags
& BTRFS_INODE_READONLY
)
8901 return generic_permission(idmap
, inode
, mask
);
8904 static int btrfs_tmpfile(struct mnt_idmap
*idmap
, struct inode
*dir
,
8905 struct file
*file
, umode_t mode
)
8907 struct btrfs_fs_info
*fs_info
= inode_to_fs_info(dir
);
8908 struct btrfs_trans_handle
*trans
;
8909 struct btrfs_root
*root
= BTRFS_I(dir
)->root
;
8910 struct inode
*inode
;
8911 struct btrfs_new_inode_args new_inode_args
= {
8913 .dentry
= file
->f_path
.dentry
,
8916 unsigned int trans_num_items
;
8919 inode
= new_inode(dir
->i_sb
);
8922 inode_init_owner(idmap
, inode
, dir
, mode
);
8923 inode
->i_fop
= &btrfs_file_operations
;
8924 inode
->i_op
= &btrfs_file_inode_operations
;
8925 inode
->i_mapping
->a_ops
= &btrfs_aops
;
8927 new_inode_args
.inode
= inode
;
8928 ret
= btrfs_new_inode_prepare(&new_inode_args
, &trans_num_items
);
8932 trans
= btrfs_start_transaction(root
, trans_num_items
);
8933 if (IS_ERR(trans
)) {
8934 ret
= PTR_ERR(trans
);
8935 goto out_new_inode_args
;
8938 ret
= btrfs_create_new_inode(trans
, &new_inode_args
);
8941 * We set number of links to 0 in btrfs_create_new_inode(), and here we
8942 * set it to 1 because d_tmpfile() will issue a warning if the count is
8945 * d_tmpfile() -> inode_dec_link_count() -> drop_nlink()
8947 set_nlink(inode
, 1);
8950 d_tmpfile(file
, inode
);
8951 unlock_new_inode(inode
);
8952 mark_inode_dirty(inode
);
8955 btrfs_end_transaction(trans
);
8956 btrfs_btree_balance_dirty(fs_info
);
8958 btrfs_new_inode_args_destroy(&new_inode_args
);
8962 return finish_open_simple(file
, ret
);
8965 int btrfs_encoded_io_compression_from_extent(struct btrfs_fs_info
*fs_info
,
8968 switch (compress_type
) {
8969 case BTRFS_COMPRESS_NONE
:
8970 return BTRFS_ENCODED_IO_COMPRESSION_NONE
;
8971 case BTRFS_COMPRESS_ZLIB
:
8972 return BTRFS_ENCODED_IO_COMPRESSION_ZLIB
;
8973 case BTRFS_COMPRESS_LZO
:
8975 * The LZO format depends on the sector size. 64K is the maximum
8976 * sector size that we support.
8978 if (fs_info
->sectorsize
< SZ_4K
|| fs_info
->sectorsize
> SZ_64K
)
8980 return BTRFS_ENCODED_IO_COMPRESSION_LZO_4K
+
8981 (fs_info
->sectorsize_bits
- 12);
8982 case BTRFS_COMPRESS_ZSTD
:
8983 return BTRFS_ENCODED_IO_COMPRESSION_ZSTD
;
8989 static ssize_t
btrfs_encoded_read_inline(
8991 struct iov_iter
*iter
, u64 start
,
8993 struct extent_state
**cached_state
,
8994 u64 extent_start
, size_t count
,
8995 struct btrfs_ioctl_encoded_io_args
*encoded
,
8998 struct btrfs_inode
*inode
= BTRFS_I(file_inode(iocb
->ki_filp
));
8999 struct btrfs_root
*root
= inode
->root
;
9000 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
9001 struct extent_io_tree
*io_tree
= &inode
->io_tree
;
9002 struct btrfs_path
*path
;
9003 struct extent_buffer
*leaf
;
9004 struct btrfs_file_extent_item
*item
;
9009 const bool nowait
= (iocb
->ki_flags
& IOCB_NOWAIT
);
9011 path
= btrfs_alloc_path();
9017 path
->nowait
= nowait
;
9019 ret
= btrfs_lookup_file_extent(NULL
, root
, path
, btrfs_ino(inode
),
9023 /* The extent item disappeared? */
9028 leaf
= path
->nodes
[0];
9029 item
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_file_extent_item
);
9031 ram_bytes
= btrfs_file_extent_ram_bytes(leaf
, item
);
9032 ptr
= btrfs_file_extent_inline_start(item
);
9034 encoded
->len
= min_t(u64
, extent_start
+ ram_bytes
,
9035 inode
->vfs_inode
.i_size
) - iocb
->ki_pos
;
9036 ret
= btrfs_encoded_io_compression_from_extent(fs_info
,
9037 btrfs_file_extent_compression(leaf
, item
));
9040 encoded
->compression
= ret
;
9041 if (encoded
->compression
) {
9044 inline_size
= btrfs_file_extent_inline_item_len(leaf
,
9046 if (inline_size
> count
) {
9050 count
= inline_size
;
9051 encoded
->unencoded_len
= ram_bytes
;
9052 encoded
->unencoded_offset
= iocb
->ki_pos
- extent_start
;
9054 count
= min_t(u64
, count
, encoded
->len
);
9055 encoded
->len
= count
;
9056 encoded
->unencoded_len
= count
;
9057 ptr
+= iocb
->ki_pos
- extent_start
;
9060 tmp
= kmalloc(count
, GFP_NOFS
);
9065 read_extent_buffer(leaf
, tmp
, ptr
, count
);
9066 btrfs_release_path(path
);
9067 unlock_extent(io_tree
, start
, lockend
, cached_state
);
9068 btrfs_inode_unlock(inode
, BTRFS_ILOCK_SHARED
);
9071 ret
= copy_to_iter(tmp
, count
, iter
);
9076 btrfs_free_path(path
);
9080 struct btrfs_encoded_read_private
{
9081 wait_queue_head_t wait
;
9084 blk_status_t status
;
9087 static void btrfs_encoded_read_endio(struct btrfs_bio
*bbio
)
9089 struct btrfs_encoded_read_private
*priv
= bbio
->private;
9091 if (bbio
->bio
.bi_status
) {
9093 * The memory barrier implied by the atomic_dec_return() here
9094 * pairs with the memory barrier implied by the
9095 * atomic_dec_return() or io_wait_event() in
9096 * btrfs_encoded_read_regular_fill_pages() to ensure that this
9097 * write is observed before the load of status in
9098 * btrfs_encoded_read_regular_fill_pages().
9100 WRITE_ONCE(priv
->status
, bbio
->bio
.bi_status
);
9102 if (atomic_dec_and_test(&priv
->pending
)) {
9103 int err
= blk_status_to_errno(READ_ONCE(priv
->status
));
9105 if (priv
->uring_ctx
) {
9106 btrfs_uring_read_extent_endio(priv
->uring_ctx
, err
);
9109 wake_up(&priv
->wait
);
9112 bio_put(&bbio
->bio
);
9115 int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode
*inode
,
9116 u64 disk_bytenr
, u64 disk_io_size
,
9117 struct page
**pages
, void *uring_ctx
)
9119 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
9120 struct btrfs_encoded_read_private
*priv
;
9121 unsigned long i
= 0;
9122 struct btrfs_bio
*bbio
;
9125 priv
= kmalloc(sizeof(struct btrfs_encoded_read_private
), GFP_NOFS
);
9129 init_waitqueue_head(&priv
->wait
);
9130 atomic_set(&priv
->pending
, 1);
9132 priv
->uring_ctx
= uring_ctx
;
9134 bbio
= btrfs_bio_alloc(BIO_MAX_VECS
, REQ_OP_READ
, fs_info
,
9135 btrfs_encoded_read_endio
, priv
);
9136 bbio
->bio
.bi_iter
.bi_sector
= disk_bytenr
>> SECTOR_SHIFT
;
9137 bbio
->inode
= inode
;
9140 size_t bytes
= min_t(u64
, disk_io_size
, PAGE_SIZE
);
9142 if (bio_add_page(&bbio
->bio
, pages
[i
], bytes
, 0) < bytes
) {
9143 atomic_inc(&priv
->pending
);
9144 btrfs_submit_bbio(bbio
, 0);
9146 bbio
= btrfs_bio_alloc(BIO_MAX_VECS
, REQ_OP_READ
, fs_info
,
9147 btrfs_encoded_read_endio
, priv
);
9148 bbio
->bio
.bi_iter
.bi_sector
= disk_bytenr
>> SECTOR_SHIFT
;
9149 bbio
->inode
= inode
;
9154 disk_bytenr
+= bytes
;
9155 disk_io_size
-= bytes
;
9156 } while (disk_io_size
);
9158 atomic_inc(&priv
->pending
);
9159 btrfs_submit_bbio(bbio
, 0);
9162 if (atomic_dec_return(&priv
->pending
) == 0) {
9163 ret
= blk_status_to_errno(READ_ONCE(priv
->status
));
9164 btrfs_uring_read_extent_endio(uring_ctx
, ret
);
9169 return -EIOCBQUEUED
;
9171 if (atomic_dec_return(&priv
->pending
) != 0)
9172 io_wait_event(priv
->wait
, !atomic_read(&priv
->pending
));
9173 /* See btrfs_encoded_read_endio() for ordering. */
9174 ret
= blk_status_to_errno(READ_ONCE(priv
->status
));
9180 ssize_t
btrfs_encoded_read_regular(struct kiocb
*iocb
, struct iov_iter
*iter
,
9181 u64 start
, u64 lockend
,
9182 struct extent_state
**cached_state
,
9183 u64 disk_bytenr
, u64 disk_io_size
,
9184 size_t count
, bool compressed
, bool *unlocked
)
9186 struct btrfs_inode
*inode
= BTRFS_I(file_inode(iocb
->ki_filp
));
9187 struct extent_io_tree
*io_tree
= &inode
->io_tree
;
9188 struct page
**pages
;
9189 unsigned long nr_pages
, i
;
9194 nr_pages
= DIV_ROUND_UP(disk_io_size
, PAGE_SIZE
);
9195 pages
= kcalloc(nr_pages
, sizeof(struct page
*), GFP_NOFS
);
9198 ret
= btrfs_alloc_page_array(nr_pages
, pages
, false);
9204 ret
= btrfs_encoded_read_regular_fill_pages(inode
, disk_bytenr
,
9205 disk_io_size
, pages
, NULL
);
9209 unlock_extent(io_tree
, start
, lockend
, cached_state
);
9210 btrfs_inode_unlock(inode
, BTRFS_ILOCK_SHARED
);
9217 i
= (iocb
->ki_pos
- start
) >> PAGE_SHIFT
;
9218 page_offset
= (iocb
->ki_pos
- start
) & (PAGE_SIZE
- 1);
9221 while (cur
< count
) {
9222 size_t bytes
= min_t(size_t, count
- cur
,
9223 PAGE_SIZE
- page_offset
);
9225 if (copy_page_to_iter(pages
[i
], page_offset
, bytes
,
9236 for (i
= 0; i
< nr_pages
; i
++) {
9238 __free_page(pages
[i
]);
9244 ssize_t
btrfs_encoded_read(struct kiocb
*iocb
, struct iov_iter
*iter
,
9245 struct btrfs_ioctl_encoded_io_args
*encoded
,
9246 struct extent_state
**cached_state
,
9247 u64
*disk_bytenr
, u64
*disk_io_size
)
9249 struct btrfs_inode
*inode
= BTRFS_I(file_inode(iocb
->ki_filp
));
9250 struct btrfs_fs_info
*fs_info
= inode
->root
->fs_info
;
9251 struct extent_io_tree
*io_tree
= &inode
->io_tree
;
9253 size_t count
= iov_iter_count(iter
);
9255 struct extent_map
*em
;
9256 const bool nowait
= (iocb
->ki_flags
& IOCB_NOWAIT
);
9257 bool unlocked
= false;
9259 file_accessed(iocb
->ki_filp
);
9261 ret
= btrfs_inode_lock(inode
,
9262 BTRFS_ILOCK_SHARED
| (nowait
? BTRFS_ILOCK_TRY
: 0));
9266 if (iocb
->ki_pos
>= inode
->vfs_inode
.i_size
) {
9267 btrfs_inode_unlock(inode
, BTRFS_ILOCK_SHARED
);
9270 start
= ALIGN_DOWN(iocb
->ki_pos
, fs_info
->sectorsize
);
9272 * We don't know how long the extent containing iocb->ki_pos is, but if
9273 * it's compressed we know that it won't be longer than this.
9275 lockend
= start
+ BTRFS_MAX_UNCOMPRESSED
- 1;
9278 struct btrfs_ordered_extent
*ordered
;
9280 if (filemap_range_needs_writeback(inode
->vfs_inode
.i_mapping
,
9283 goto out_unlock_inode
;
9286 if (!try_lock_extent(io_tree
, start
, lockend
, cached_state
)) {
9288 goto out_unlock_inode
;
9291 ordered
= btrfs_lookup_ordered_range(inode
, start
,
9292 lockend
- start
+ 1);
9294 btrfs_put_ordered_extent(ordered
);
9295 unlock_extent(io_tree
, start
, lockend
, cached_state
);
9297 goto out_unlock_inode
;
9301 struct btrfs_ordered_extent
*ordered
;
9303 ret
= btrfs_wait_ordered_range(inode
, start
,
9304 lockend
- start
+ 1);
9306 goto out_unlock_inode
;
9308 lock_extent(io_tree
, start
, lockend
, cached_state
);
9309 ordered
= btrfs_lookup_ordered_range(inode
, start
,
9310 lockend
- start
+ 1);
9313 btrfs_put_ordered_extent(ordered
);
9314 unlock_extent(io_tree
, start
, lockend
, cached_state
);
9319 em
= btrfs_get_extent(inode
, NULL
, start
, lockend
- start
+ 1);
9322 goto out_unlock_extent
;
9325 if (em
->disk_bytenr
== EXTENT_MAP_INLINE
) {
9326 u64 extent_start
= em
->start
;
9329 * For inline extents we get everything we need out of the
9332 free_extent_map(em
);
9334 ret
= btrfs_encoded_read_inline(iocb
, iter
, start
, lockend
,
9335 cached_state
, extent_start
,
9336 count
, encoded
, &unlocked
);
9337 goto out_unlock_extent
;
9341 * We only want to return up to EOF even if the extent extends beyond
9344 encoded
->len
= min_t(u64
, extent_map_end(em
),
9345 inode
->vfs_inode
.i_size
) - iocb
->ki_pos
;
9346 if (em
->disk_bytenr
== EXTENT_MAP_HOLE
||
9347 (em
->flags
& EXTENT_FLAG_PREALLOC
)) {
9348 *disk_bytenr
= EXTENT_MAP_HOLE
;
9349 count
= min_t(u64
, count
, encoded
->len
);
9350 encoded
->len
= count
;
9351 encoded
->unencoded_len
= count
;
9352 } else if (extent_map_is_compressed(em
)) {
9353 *disk_bytenr
= em
->disk_bytenr
;
9355 * Bail if the buffer isn't large enough to return the whole
9356 * compressed extent.
9358 if (em
->disk_num_bytes
> count
) {
9362 *disk_io_size
= em
->disk_num_bytes
;
9363 count
= em
->disk_num_bytes
;
9364 encoded
->unencoded_len
= em
->ram_bytes
;
9365 encoded
->unencoded_offset
= iocb
->ki_pos
- (em
->start
- em
->offset
);
9366 ret
= btrfs_encoded_io_compression_from_extent(fs_info
,
9367 extent_map_compression(em
));
9370 encoded
->compression
= ret
;
9372 *disk_bytenr
= extent_map_block_start(em
) + (start
- em
->start
);
9373 if (encoded
->len
> count
)
9374 encoded
->len
= count
;
9376 * Don't read beyond what we locked. This also limits the page
9377 * allocations that we'll do.
9379 *disk_io_size
= min(lockend
+ 1, iocb
->ki_pos
+ encoded
->len
) - start
;
9380 count
= start
+ *disk_io_size
- iocb
->ki_pos
;
9381 encoded
->len
= count
;
9382 encoded
->unencoded_len
= count
;
9383 *disk_io_size
= ALIGN(*disk_io_size
, fs_info
->sectorsize
);
9385 free_extent_map(em
);
9388 if (*disk_bytenr
== EXTENT_MAP_HOLE
) {
9389 unlock_extent(io_tree
, start
, lockend
, cached_state
);
9390 btrfs_inode_unlock(inode
, BTRFS_ILOCK_SHARED
);
9392 ret
= iov_iter_zero(count
, iter
);
9397 goto out_unlock_extent
;
9401 free_extent_map(em
);
9403 /* Leave inode and extent locked if we need to do a read. */
9404 if (!unlocked
&& ret
!= -EIOCBQUEUED
)
9405 unlock_extent(io_tree
, start
, lockend
, cached_state
);
9407 if (!unlocked
&& ret
!= -EIOCBQUEUED
)
9408 btrfs_inode_unlock(inode
, BTRFS_ILOCK_SHARED
);
9412 ssize_t
btrfs_do_encoded_write(struct kiocb
*iocb
, struct iov_iter
*from
,
9413 const struct btrfs_ioctl_encoded_io_args
*encoded
)
9415 struct btrfs_inode
*inode
= BTRFS_I(file_inode(iocb
->ki_filp
));
9416 struct btrfs_root
*root
= inode
->root
;
9417 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
9418 struct extent_io_tree
*io_tree
= &inode
->io_tree
;
9419 struct extent_changeset
*data_reserved
= NULL
;
9420 struct extent_state
*cached_state
= NULL
;
9421 struct btrfs_ordered_extent
*ordered
;
9422 struct btrfs_file_extent file_extent
;
9426 u64 num_bytes
, ram_bytes
, disk_num_bytes
;
9427 unsigned long nr_folios
, i
;
9428 struct folio
**folios
;
9429 struct btrfs_key ins
;
9430 bool extent_reserved
= false;
9431 struct extent_map
*em
;
9434 switch (encoded
->compression
) {
9435 case BTRFS_ENCODED_IO_COMPRESSION_ZLIB
:
9436 compression
= BTRFS_COMPRESS_ZLIB
;
9438 case BTRFS_ENCODED_IO_COMPRESSION_ZSTD
:
9439 compression
= BTRFS_COMPRESS_ZSTD
;
9441 case BTRFS_ENCODED_IO_COMPRESSION_LZO_4K
:
9442 case BTRFS_ENCODED_IO_COMPRESSION_LZO_8K
:
9443 case BTRFS_ENCODED_IO_COMPRESSION_LZO_16K
:
9444 case BTRFS_ENCODED_IO_COMPRESSION_LZO_32K
:
9445 case BTRFS_ENCODED_IO_COMPRESSION_LZO_64K
:
9446 /* The sector size must match for LZO. */
9447 if (encoded
->compression
-
9448 BTRFS_ENCODED_IO_COMPRESSION_LZO_4K
+ 12 !=
9449 fs_info
->sectorsize_bits
)
9451 compression
= BTRFS_COMPRESS_LZO
;
9456 if (encoded
->encryption
!= BTRFS_ENCODED_IO_ENCRYPTION_NONE
)
9460 * Compressed extents should always have checksums, so error out if we
9461 * have a NOCOW file or inode was created while mounted with NODATASUM.
9463 if (inode
->flags
& BTRFS_INODE_NODATASUM
)
9466 orig_count
= iov_iter_count(from
);
9468 /* The extent size must be sane. */
9469 if (encoded
->unencoded_len
> BTRFS_MAX_UNCOMPRESSED
||
9470 orig_count
> BTRFS_MAX_COMPRESSED
|| orig_count
== 0)
9474 * The compressed data must be smaller than the decompressed data.
9476 * It's of course possible for data to compress to larger or the same
9477 * size, but the buffered I/O path falls back to no compression for such
9478 * data, and we don't want to break any assumptions by creating these
9481 * Note that this is less strict than the current check we have that the
9482 * compressed data must be at least one sector smaller than the
9483 * decompressed data. We only want to enforce the weaker requirement
9484 * from old kernels that it is at least one byte smaller.
9486 if (orig_count
>= encoded
->unencoded_len
)
9489 /* The extent must start on a sector boundary. */
9490 start
= iocb
->ki_pos
;
9491 if (!IS_ALIGNED(start
, fs_info
->sectorsize
))
9495 * The extent must end on a sector boundary. However, we allow a write
9496 * which ends at or extends i_size to have an unaligned length; we round
9497 * up the extent size and set i_size to the unaligned end.
9499 if (start
+ encoded
->len
< inode
->vfs_inode
.i_size
&&
9500 !IS_ALIGNED(start
+ encoded
->len
, fs_info
->sectorsize
))
9503 /* Finally, the offset in the unencoded data must be sector-aligned. */
9504 if (!IS_ALIGNED(encoded
->unencoded_offset
, fs_info
->sectorsize
))
9507 num_bytes
= ALIGN(encoded
->len
, fs_info
->sectorsize
);
9508 ram_bytes
= ALIGN(encoded
->unencoded_len
, fs_info
->sectorsize
);
9509 end
= start
+ num_bytes
- 1;
9512 * If the extent cannot be inline, the compressed data on disk must be
9513 * sector-aligned. For convenience, we extend it with zeroes if it
9516 disk_num_bytes
= ALIGN(orig_count
, fs_info
->sectorsize
);
9517 nr_folios
= DIV_ROUND_UP(disk_num_bytes
, PAGE_SIZE
);
9518 folios
= kvcalloc(nr_folios
, sizeof(struct folio
*), GFP_KERNEL_ACCOUNT
);
9521 for (i
= 0; i
< nr_folios
; i
++) {
9522 size_t bytes
= min_t(size_t, PAGE_SIZE
, iov_iter_count(from
));
9525 folios
[i
] = folio_alloc(GFP_KERNEL_ACCOUNT
, 0);
9530 kaddr
= kmap_local_folio(folios
[i
], 0);
9531 if (copy_from_iter(kaddr
, bytes
, from
) != bytes
) {
9532 kunmap_local(kaddr
);
9536 if (bytes
< PAGE_SIZE
)
9537 memset(kaddr
+ bytes
, 0, PAGE_SIZE
- bytes
);
9538 kunmap_local(kaddr
);
9542 struct btrfs_ordered_extent
*ordered
;
9544 ret
= btrfs_wait_ordered_range(inode
, start
, num_bytes
);
9547 ret
= invalidate_inode_pages2_range(inode
->vfs_inode
.i_mapping
,
9548 start
>> PAGE_SHIFT
,
9552 lock_extent(io_tree
, start
, end
, &cached_state
);
9553 ordered
= btrfs_lookup_ordered_range(inode
, start
, num_bytes
);
9555 !filemap_range_has_page(inode
->vfs_inode
.i_mapping
, start
, end
))
9558 btrfs_put_ordered_extent(ordered
);
9559 unlock_extent(io_tree
, start
, end
, &cached_state
);
9564 * We don't use the higher-level delalloc space functions because our
9565 * num_bytes and disk_num_bytes are different.
9567 ret
= btrfs_alloc_data_chunk_ondemand(inode
, disk_num_bytes
);
9570 ret
= btrfs_qgroup_reserve_data(inode
, &data_reserved
, start
, num_bytes
);
9572 goto out_free_data_space
;
9573 ret
= btrfs_delalloc_reserve_metadata(inode
, num_bytes
, disk_num_bytes
,
9576 goto out_qgroup_free_data
;
9578 /* Try an inline extent first. */
9579 if (encoded
->unencoded_len
== encoded
->len
&&
9580 encoded
->unencoded_offset
== 0 &&
9581 can_cow_file_range_inline(inode
, start
, encoded
->len
, orig_count
)) {
9582 ret
= __cow_file_range_inline(inode
, encoded
->len
,
9583 orig_count
, compression
, folios
[0],
9588 goto out_delalloc_release
;
9592 ret
= btrfs_reserve_extent(root
, disk_num_bytes
, disk_num_bytes
,
9593 disk_num_bytes
, 0, 0, &ins
, 1, 1);
9595 goto out_delalloc_release
;
9596 extent_reserved
= true;
9598 file_extent
.disk_bytenr
= ins
.objectid
;
9599 file_extent
.disk_num_bytes
= ins
.offset
;
9600 file_extent
.num_bytes
= num_bytes
;
9601 file_extent
.ram_bytes
= ram_bytes
;
9602 file_extent
.offset
= encoded
->unencoded_offset
;
9603 file_extent
.compression
= compression
;
9604 em
= btrfs_create_io_em(inode
, start
, &file_extent
, BTRFS_ORDERED_COMPRESSED
);
9607 goto out_free_reserved
;
9609 free_extent_map(em
);
9611 ordered
= btrfs_alloc_ordered_extent(inode
, start
, &file_extent
,
9612 (1 << BTRFS_ORDERED_ENCODED
) |
9613 (1 << BTRFS_ORDERED_COMPRESSED
));
9614 if (IS_ERR(ordered
)) {
9615 btrfs_drop_extent_map_range(inode
, start
, end
, false);
9616 ret
= PTR_ERR(ordered
);
9617 goto out_free_reserved
;
9619 btrfs_dec_block_group_reservations(fs_info
, ins
.objectid
);
9621 if (start
+ encoded
->len
> inode
->vfs_inode
.i_size
)
9622 i_size_write(&inode
->vfs_inode
, start
+ encoded
->len
);
9624 unlock_extent(io_tree
, start
, end
, &cached_state
);
9626 btrfs_delalloc_release_extents(inode
, num_bytes
);
9628 btrfs_submit_compressed_write(ordered
, folios
, nr_folios
, 0, false);
9633 btrfs_dec_block_group_reservations(fs_info
, ins
.objectid
);
9634 btrfs_free_reserved_extent(fs_info
, ins
.objectid
, ins
.offset
, 1);
9635 out_delalloc_release
:
9636 btrfs_delalloc_release_extents(inode
, num_bytes
);
9637 btrfs_delalloc_release_metadata(inode
, disk_num_bytes
, ret
< 0);
9638 out_qgroup_free_data
:
9640 btrfs_qgroup_free_data(inode
, data_reserved
, start
, num_bytes
, NULL
);
9641 out_free_data_space
:
9643 * If btrfs_reserve_extent() succeeded, then we already decremented
9646 if (!extent_reserved
)
9647 btrfs_free_reserved_data_space_noquota(fs_info
, disk_num_bytes
);
9649 unlock_extent(io_tree
, start
, end
, &cached_state
);
9651 for (i
= 0; i
< nr_folios
; i
++) {
9653 folio_put(folios
[i
]);
9658 iocb
->ki_pos
+= encoded
->len
;
9664 * Add an entry indicating a block group or device which is pinned by a
9665 * swapfile. Returns 0 on success, 1 if there is already an entry for it, or a
9666 * negative errno on failure.
9668 static int btrfs_add_swapfile_pin(struct inode
*inode
, void *ptr
,
9669 bool is_block_group
)
9671 struct btrfs_fs_info
*fs_info
= BTRFS_I(inode
)->root
->fs_info
;
9672 struct btrfs_swapfile_pin
*sp
, *entry
;
9674 struct rb_node
*parent
= NULL
;
9676 sp
= kmalloc(sizeof(*sp
), GFP_NOFS
);
9681 sp
->is_block_group
= is_block_group
;
9682 sp
->bg_extent_count
= 1;
9684 spin_lock(&fs_info
->swapfile_pins_lock
);
9685 p
= &fs_info
->swapfile_pins
.rb_node
;
9688 entry
= rb_entry(parent
, struct btrfs_swapfile_pin
, node
);
9689 if (sp
->ptr
< entry
->ptr
||
9690 (sp
->ptr
== entry
->ptr
&& sp
->inode
< entry
->inode
)) {
9692 } else if (sp
->ptr
> entry
->ptr
||
9693 (sp
->ptr
== entry
->ptr
&& sp
->inode
> entry
->inode
)) {
9694 p
= &(*p
)->rb_right
;
9697 entry
->bg_extent_count
++;
9698 spin_unlock(&fs_info
->swapfile_pins_lock
);
9703 rb_link_node(&sp
->node
, parent
, p
);
9704 rb_insert_color(&sp
->node
, &fs_info
->swapfile_pins
);
9705 spin_unlock(&fs_info
->swapfile_pins_lock
);
9709 /* Free all of the entries pinned by this swapfile. */
9710 static void btrfs_free_swapfile_pins(struct inode
*inode
)
9712 struct btrfs_fs_info
*fs_info
= BTRFS_I(inode
)->root
->fs_info
;
9713 struct btrfs_swapfile_pin
*sp
;
9714 struct rb_node
*node
, *next
;
9716 spin_lock(&fs_info
->swapfile_pins_lock
);
9717 node
= rb_first(&fs_info
->swapfile_pins
);
9719 next
= rb_next(node
);
9720 sp
= rb_entry(node
, struct btrfs_swapfile_pin
, node
);
9721 if (sp
->inode
== inode
) {
9722 rb_erase(&sp
->node
, &fs_info
->swapfile_pins
);
9723 if (sp
->is_block_group
) {
9724 btrfs_dec_block_group_swap_extents(sp
->ptr
,
9725 sp
->bg_extent_count
);
9726 btrfs_put_block_group(sp
->ptr
);
9732 spin_unlock(&fs_info
->swapfile_pins_lock
);
9735 struct btrfs_swap_info
{
9741 unsigned long nr_pages
;
9745 static int btrfs_add_swap_extent(struct swap_info_struct
*sis
,
9746 struct btrfs_swap_info
*bsi
)
9748 unsigned long nr_pages
;
9749 unsigned long max_pages
;
9750 u64 first_ppage
, first_ppage_reported
, next_ppage
;
9754 * Our swapfile may have had its size extended after the swap header was
9755 * written. In that case activating the swapfile should not go beyond
9756 * the max size set in the swap header.
9758 if (bsi
->nr_pages
>= sis
->max
)
9761 max_pages
= sis
->max
- bsi
->nr_pages
;
9762 first_ppage
= PAGE_ALIGN(bsi
->block_start
) >> PAGE_SHIFT
;
9763 next_ppage
= PAGE_ALIGN_DOWN(bsi
->block_start
+ bsi
->block_len
) >> PAGE_SHIFT
;
9765 if (first_ppage
>= next_ppage
)
9767 nr_pages
= next_ppage
- first_ppage
;
9768 nr_pages
= min(nr_pages
, max_pages
);
9770 first_ppage_reported
= first_ppage
;
9771 if (bsi
->start
== 0)
9772 first_ppage_reported
++;
9773 if (bsi
->lowest_ppage
> first_ppage_reported
)
9774 bsi
->lowest_ppage
= first_ppage_reported
;
9775 if (bsi
->highest_ppage
< (next_ppage
- 1))
9776 bsi
->highest_ppage
= next_ppage
- 1;
9778 ret
= add_swap_extent(sis
, bsi
->nr_pages
, nr_pages
, first_ppage
);
9781 bsi
->nr_extents
+= ret
;
9782 bsi
->nr_pages
+= nr_pages
;
9786 static void btrfs_swap_deactivate(struct file
*file
)
9788 struct inode
*inode
= file_inode(file
);
9790 btrfs_free_swapfile_pins(inode
);
9791 atomic_dec(&BTRFS_I(inode
)->root
->nr_swapfiles
);
9794 static int btrfs_swap_activate(struct swap_info_struct
*sis
, struct file
*file
,
9797 struct inode
*inode
= file_inode(file
);
9798 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
9799 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
9800 struct extent_io_tree
*io_tree
= &BTRFS_I(inode
)->io_tree
;
9801 struct extent_state
*cached_state
= NULL
;
9802 struct extent_map
*em
= NULL
;
9803 struct btrfs_chunk_map
*map
= NULL
;
9804 struct btrfs_device
*device
= NULL
;
9805 struct btrfs_swap_info bsi
= {
9806 .lowest_ppage
= (sector_t
)-1ULL,
9813 * If the swap file was just created, make sure delalloc is done. If the
9814 * file changes again after this, the user is doing something stupid and
9815 * we don't really care.
9817 ret
= btrfs_wait_ordered_range(BTRFS_I(inode
), 0, (u64
)-1);
9822 * The inode is locked, so these flags won't change after we check them.
9824 if (BTRFS_I(inode
)->flags
& BTRFS_INODE_COMPRESS
) {
9825 btrfs_warn(fs_info
, "swapfile must not be compressed");
9828 if (!(BTRFS_I(inode
)->flags
& BTRFS_INODE_NODATACOW
)) {
9829 btrfs_warn(fs_info
, "swapfile must not be copy-on-write");
9832 if (!(BTRFS_I(inode
)->flags
& BTRFS_INODE_NODATASUM
)) {
9833 btrfs_warn(fs_info
, "swapfile must not be checksummed");
9838 * Balance or device remove/replace/resize can move stuff around from
9839 * under us. The exclop protection makes sure they aren't running/won't
9840 * run concurrently while we are mapping the swap extents, and
9841 * fs_info->swapfile_pins prevents them from running while the swap
9842 * file is active and moving the extents. Note that this also prevents
9843 * a concurrent device add which isn't actually necessary, but it's not
9844 * really worth the trouble to allow it.
9846 if (!btrfs_exclop_start(fs_info
, BTRFS_EXCLOP_SWAP_ACTIVATE
)) {
9848 "cannot activate swapfile while exclusive operation is running");
9853 * Prevent snapshot creation while we are activating the swap file.
9854 * We do not want to race with snapshot creation. If snapshot creation
9855 * already started before we bumped nr_swapfiles from 0 to 1 and
9856 * completes before the first write into the swap file after it is
9857 * activated, than that write would fallback to COW.
9859 if (!btrfs_drew_try_write_lock(&root
->snapshot_lock
)) {
9860 btrfs_exclop_finish(fs_info
);
9862 "cannot activate swapfile because snapshot creation is in progress");
9866 * Snapshots can create extents which require COW even if NODATACOW is
9867 * set. We use this counter to prevent snapshots. We must increment it
9868 * before walking the extents because we don't want a concurrent
9869 * snapshot to run after we've already checked the extents.
9871 * It is possible that subvolume is marked for deletion but still not
9872 * removed yet. To prevent this race, we check the root status before
9873 * activating the swapfile.
9875 spin_lock(&root
->root_item_lock
);
9876 if (btrfs_root_dead(root
)) {
9877 spin_unlock(&root
->root_item_lock
);
9879 btrfs_drew_write_unlock(&root
->snapshot_lock
);
9880 btrfs_exclop_finish(fs_info
);
9882 "cannot activate swapfile because subvolume %llu is being deleted",
9883 btrfs_root_id(root
));
9886 atomic_inc(&root
->nr_swapfiles
);
9887 spin_unlock(&root
->root_item_lock
);
9889 isize
= ALIGN_DOWN(inode
->i_size
, fs_info
->sectorsize
);
9891 lock_extent(io_tree
, 0, isize
- 1, &cached_state
);
9893 while (start
< isize
) {
9894 u64 logical_block_start
, physical_block_start
;
9895 struct btrfs_block_group
*bg
;
9896 u64 len
= isize
- start
;
9898 em
= btrfs_get_extent(BTRFS_I(inode
), NULL
, start
, len
);
9904 if (em
->disk_bytenr
== EXTENT_MAP_HOLE
) {
9905 btrfs_warn(fs_info
, "swapfile must not have holes");
9909 if (em
->disk_bytenr
== EXTENT_MAP_INLINE
) {
9911 * It's unlikely we'll ever actually find ourselves
9912 * here, as a file small enough to fit inline won't be
9913 * big enough to store more than the swap header, but in
9914 * case something changes in the future, let's catch it
9915 * here rather than later.
9917 btrfs_warn(fs_info
, "swapfile must not be inline");
9921 if (extent_map_is_compressed(em
)) {
9922 btrfs_warn(fs_info
, "swapfile must not be compressed");
9927 logical_block_start
= extent_map_block_start(em
) + (start
- em
->start
);
9928 len
= min(len
, em
->len
- (start
- em
->start
));
9929 free_extent_map(em
);
9932 ret
= can_nocow_extent(inode
, start
, &len
, NULL
, false, true);
9939 "swapfile must not be copy-on-write");
9944 map
= btrfs_get_chunk_map(fs_info
, logical_block_start
, len
);
9950 if (map
->type
& BTRFS_BLOCK_GROUP_PROFILE_MASK
) {
9952 "swapfile must have single data profile");
9957 if (device
== NULL
) {
9958 device
= map
->stripes
[0].dev
;
9959 ret
= btrfs_add_swapfile_pin(inode
, device
, false);
9964 } else if (device
!= map
->stripes
[0].dev
) {
9965 btrfs_warn(fs_info
, "swapfile must be on one device");
9970 physical_block_start
= (map
->stripes
[0].physical
+
9971 (logical_block_start
- map
->start
));
9972 len
= min(len
, map
->chunk_len
- (logical_block_start
- map
->start
));
9973 btrfs_free_chunk_map(map
);
9976 bg
= btrfs_lookup_block_group(fs_info
, logical_block_start
);
9979 "could not find block group containing swapfile");
9984 if (!btrfs_inc_block_group_swap_extents(bg
)) {
9986 "block group for swapfile at %llu is read-only%s",
9988 atomic_read(&fs_info
->scrubs_running
) ?
9989 " (scrub running)" : "");
9990 btrfs_put_block_group(bg
);
9995 ret
= btrfs_add_swapfile_pin(inode
, bg
, true);
9997 btrfs_put_block_group(bg
);
10004 if (bsi
.block_len
&&
10005 bsi
.block_start
+ bsi
.block_len
== physical_block_start
) {
10006 bsi
.block_len
+= len
;
10008 if (bsi
.block_len
) {
10009 ret
= btrfs_add_swap_extent(sis
, &bsi
);
10014 bsi
.block_start
= physical_block_start
;
10015 bsi
.block_len
= len
;
10022 ret
= btrfs_add_swap_extent(sis
, &bsi
);
10025 if (!IS_ERR_OR_NULL(em
))
10026 free_extent_map(em
);
10027 if (!IS_ERR_OR_NULL(map
))
10028 btrfs_free_chunk_map(map
);
10030 unlock_extent(io_tree
, 0, isize
- 1, &cached_state
);
10033 btrfs_swap_deactivate(file
);
10035 btrfs_drew_write_unlock(&root
->snapshot_lock
);
10037 btrfs_exclop_finish(fs_info
);
10043 sis
->bdev
= device
->bdev
;
10044 *span
= bsi
.highest_ppage
- bsi
.lowest_ppage
+ 1;
10045 sis
->max
= bsi
.nr_pages
;
10046 sis
->pages
= bsi
.nr_pages
- 1;
10047 sis
->highest_bit
= bsi
.nr_pages
- 1;
10048 return bsi
.nr_extents
;
10051 static void btrfs_swap_deactivate(struct file
*file
)
10055 static int btrfs_swap_activate(struct swap_info_struct
*sis
, struct file
*file
,
10058 return -EOPNOTSUPP
;
10063 * Update the number of bytes used in the VFS' inode. When we replace extents in
10064 * a range (clone, dedupe, fallocate's zero range), we must update the number of
10065 * bytes used by the inode in an atomic manner, so that concurrent stat(2) calls
10066 * always get a correct value.
10068 void btrfs_update_inode_bytes(struct btrfs_inode
*inode
,
10069 const u64 add_bytes
,
10070 const u64 del_bytes
)
10072 if (add_bytes
== del_bytes
)
10075 spin_lock(&inode
->lock
);
10077 inode_sub_bytes(&inode
->vfs_inode
, del_bytes
);
10079 inode_add_bytes(&inode
->vfs_inode
, add_bytes
);
10080 spin_unlock(&inode
->lock
);
10084 * Verify that there are no ordered extents for a given file range.
10086 * @inode: The target inode.
10087 * @start: Start offset of the file range, should be sector size aligned.
10088 * @end: End offset (inclusive) of the file range, its value +1 should be
10089 * sector size aligned.
10091 * This should typically be used for cases where we locked an inode's VFS lock in
10092 * exclusive mode, we have also locked the inode's i_mmap_lock in exclusive mode,
10093 * we have flushed all delalloc in the range, we have waited for all ordered
10094 * extents in the range to complete and finally we have locked the file range in
10095 * the inode's io_tree.
10097 void btrfs_assert_inode_range_clean(struct btrfs_inode
*inode
, u64 start
, u64 end
)
10099 struct btrfs_root
*root
= inode
->root
;
10100 struct btrfs_ordered_extent
*ordered
;
10102 if (!IS_ENABLED(CONFIG_BTRFS_ASSERT
))
10105 ordered
= btrfs_lookup_first_ordered_range(inode
, start
, end
+ 1 - start
);
10107 btrfs_err(root
->fs_info
,
10108 "found unexpected ordered extent in file range [%llu, %llu] for inode %llu root %llu (ordered range [%llu, %llu])",
10109 start
, end
, btrfs_ino(inode
), btrfs_root_id(root
),
10110 ordered
->file_offset
,
10111 ordered
->file_offset
+ ordered
->num_bytes
- 1);
10112 btrfs_put_ordered_extent(ordered
);
10115 ASSERT(ordered
== NULL
);
10119 * Find the first inode with a minimum number.
10121 * @root: The root to search for.
10122 * @min_ino: The minimum inode number.
10124 * Find the first inode in the @root with a number >= @min_ino and return it.
10125 * Returns NULL if no such inode found.
10127 struct btrfs_inode
*btrfs_find_first_inode(struct btrfs_root
*root
, u64 min_ino
)
10129 struct btrfs_inode
*inode
;
10130 unsigned long from
= min_ino
;
10132 xa_lock(&root
->inodes
);
10134 inode
= xa_find(&root
->inodes
, &from
, ULONG_MAX
, XA_PRESENT
);
10137 if (igrab(&inode
->vfs_inode
))
10140 from
= btrfs_ino(inode
) + 1;
10141 cond_resched_lock(&root
->inodes
.xa_lock
);
10143 xa_unlock(&root
->inodes
);
10148 static const struct inode_operations btrfs_dir_inode_operations
= {
10149 .getattr
= btrfs_getattr
,
10150 .lookup
= btrfs_lookup
,
10151 .create
= btrfs_create
,
10152 .unlink
= btrfs_unlink
,
10153 .link
= btrfs_link
,
10154 .mkdir
= btrfs_mkdir
,
10155 .rmdir
= btrfs_rmdir
,
10156 .rename
= btrfs_rename2
,
10157 .symlink
= btrfs_symlink
,
10158 .setattr
= btrfs_setattr
,
10159 .mknod
= btrfs_mknod
,
10160 .listxattr
= btrfs_listxattr
,
10161 .permission
= btrfs_permission
,
10162 .get_inode_acl
= btrfs_get_acl
,
10163 .set_acl
= btrfs_set_acl
,
10164 .update_time
= btrfs_update_time
,
10165 .tmpfile
= btrfs_tmpfile
,
10166 .fileattr_get
= btrfs_fileattr_get
,
10167 .fileattr_set
= btrfs_fileattr_set
,
10170 static const struct file_operations btrfs_dir_file_operations
= {
10171 .llseek
= btrfs_dir_llseek
,
10172 .read
= generic_read_dir
,
10173 .iterate_shared
= btrfs_real_readdir
,
10174 .open
= btrfs_opendir
,
10175 .unlocked_ioctl
= btrfs_ioctl
,
10176 #ifdef CONFIG_COMPAT
10177 .compat_ioctl
= btrfs_compat_ioctl
,
10179 .release
= btrfs_release_file
,
10180 .fsync
= btrfs_sync_file
,
10184 * btrfs doesn't support the bmap operation because swapfiles
10185 * use bmap to make a mapping of extents in the file. They assume
10186 * these extents won't change over the life of the file and they
10187 * use the bmap result to do IO directly to the drive.
10189 * the btrfs bmap call would return logical addresses that aren't
10190 * suitable for IO and they also will change frequently as COW
10191 * operations happen. So, swapfile + btrfs == corruption.
10193 * For now we're avoiding this by dropping bmap.
10195 static const struct address_space_operations btrfs_aops
= {
10196 .read_folio
= btrfs_read_folio
,
10197 .writepages
= btrfs_writepages
,
10198 .readahead
= btrfs_readahead
,
10199 .invalidate_folio
= btrfs_invalidate_folio
,
10200 .launder_folio
= btrfs_launder_folio
,
10201 .release_folio
= btrfs_release_folio
,
10202 .migrate_folio
= btrfs_migrate_folio
,
10203 .dirty_folio
= filemap_dirty_folio
,
10204 .error_remove_folio
= generic_error_remove_folio
,
10205 .swap_activate
= btrfs_swap_activate
,
10206 .swap_deactivate
= btrfs_swap_deactivate
,
10209 static const struct inode_operations btrfs_file_inode_operations
= {
10210 .getattr
= btrfs_getattr
,
10211 .setattr
= btrfs_setattr
,
10212 .listxattr
= btrfs_listxattr
,
10213 .permission
= btrfs_permission
,
10214 .fiemap
= btrfs_fiemap
,
10215 .get_inode_acl
= btrfs_get_acl
,
10216 .set_acl
= btrfs_set_acl
,
10217 .update_time
= btrfs_update_time
,
10218 .fileattr_get
= btrfs_fileattr_get
,
10219 .fileattr_set
= btrfs_fileattr_set
,
10221 static const struct inode_operations btrfs_special_inode_operations
= {
10222 .getattr
= btrfs_getattr
,
10223 .setattr
= btrfs_setattr
,
10224 .permission
= btrfs_permission
,
10225 .listxattr
= btrfs_listxattr
,
10226 .get_inode_acl
= btrfs_get_acl
,
10227 .set_acl
= btrfs_set_acl
,
10228 .update_time
= btrfs_update_time
,
10230 static const struct inode_operations btrfs_symlink_inode_operations
= {
10231 .get_link
= page_get_link
,
10232 .getattr
= btrfs_getattr
,
10233 .setattr
= btrfs_setattr
,
10234 .permission
= btrfs_permission
,
10235 .listxattr
= btrfs_listxattr
,
10236 .update_time
= btrfs_update_time
,
10239 const struct dentry_operations btrfs_dentry_operations
= {
10240 .d_delete
= btrfs_dentry_delete
,