2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/kernel.h>
20 #include <linux/bio.h>
21 #include <linux/buffer_head.h>
22 #include <linux/file.h>
24 #include <linux/pagemap.h>
25 #include <linux/highmem.h>
26 #include <linux/time.h>
27 #include <linux/init.h>
28 #include <linux/string.h>
29 #include <linux/backing-dev.h>
30 #include <linux/mpage.h>
31 #include <linux/swap.h>
32 #include <linux/writeback.h>
33 #include <linux/compat.h>
34 #include <linux/bit_spinlock.h>
35 #include <linux/xattr.h>
36 #include <linux/posix_acl.h>
37 #include <linux/falloc.h>
38 #include <linux/slab.h>
39 #include <linux/ratelimit.h>
40 #include <linux/mount.h>
41 #include <linux/btrfs.h>
42 #include <linux/blkdev.h>
43 #include <linux/posix_acl_xattr.h>
44 #include <linux/uio.h>
47 #include "transaction.h"
48 #include "btrfs_inode.h"
49 #include "print-tree.h"
50 #include "ordered-data.h"
54 #include "compression.h"
56 #include "free-space-cache.h"
57 #include "inode-map.h"
64 struct btrfs_iget_args
{
65 struct btrfs_key
*location
;
66 struct btrfs_root
*root
;
69 struct btrfs_dio_data
{
70 u64 outstanding_extents
;
72 u64 unsubmitted_oe_range_start
;
73 u64 unsubmitted_oe_range_end
;
77 static const struct inode_operations btrfs_dir_inode_operations
;
78 static const struct inode_operations btrfs_symlink_inode_operations
;
79 static const struct inode_operations btrfs_dir_ro_inode_operations
;
80 static const struct inode_operations btrfs_special_inode_operations
;
81 static const struct inode_operations btrfs_file_inode_operations
;
82 static const struct address_space_operations btrfs_aops
;
83 static const struct address_space_operations btrfs_symlink_aops
;
84 static const struct file_operations btrfs_dir_file_operations
;
85 static const struct extent_io_ops btrfs_extent_io_ops
;
87 static struct kmem_cache
*btrfs_inode_cachep
;
88 struct kmem_cache
*btrfs_trans_handle_cachep
;
89 struct kmem_cache
*btrfs_transaction_cachep
;
90 struct kmem_cache
*btrfs_path_cachep
;
91 struct kmem_cache
*btrfs_free_space_cachep
;
94 static const unsigned char btrfs_type_by_mode
[S_IFMT
>> S_SHIFT
] = {
95 [S_IFREG
>> S_SHIFT
] = BTRFS_FT_REG_FILE
,
96 [S_IFDIR
>> S_SHIFT
] = BTRFS_FT_DIR
,
97 [S_IFCHR
>> S_SHIFT
] = BTRFS_FT_CHRDEV
,
98 [S_IFBLK
>> S_SHIFT
] = BTRFS_FT_BLKDEV
,
99 [S_IFIFO
>> S_SHIFT
] = BTRFS_FT_FIFO
,
100 [S_IFSOCK
>> S_SHIFT
] = BTRFS_FT_SOCK
,
101 [S_IFLNK
>> S_SHIFT
] = BTRFS_FT_SYMLINK
,
104 static int btrfs_setsize(struct inode
*inode
, struct iattr
*attr
);
105 static int btrfs_truncate(struct inode
*inode
);
106 static int btrfs_finish_ordered_io(struct btrfs_ordered_extent
*ordered_extent
);
107 static noinline
int cow_file_range(struct inode
*inode
,
108 struct page
*locked_page
,
109 u64 start
, u64 end
, u64 delalloc_end
,
110 int *page_started
, unsigned long *nr_written
,
111 int unlock
, struct btrfs_dedupe_hash
*hash
);
112 static struct extent_map
*create_io_em(struct inode
*inode
, u64 start
, u64 len
,
113 u64 orig_start
, u64 block_start
,
114 u64 block_len
, u64 orig_block_len
,
115 u64 ram_bytes
, int compress_type
,
118 static int btrfs_dirty_inode(struct inode
*inode
);
120 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
121 void btrfs_test_inode_set_ops(struct inode
*inode
)
123 BTRFS_I(inode
)->io_tree
.ops
= &btrfs_extent_io_ops
;
127 static int btrfs_init_inode_security(struct btrfs_trans_handle
*trans
,
128 struct inode
*inode
, struct inode
*dir
,
129 const struct qstr
*qstr
)
133 err
= btrfs_init_acl(trans
, inode
, dir
);
135 err
= btrfs_xattr_security_init(trans
, inode
, dir
, qstr
);
140 * this does all the hard work for inserting an inline extent into
141 * the btree. The caller should have done a btrfs_drop_extents so that
142 * no overlapping inline items exist in the btree
144 static int insert_inline_extent(struct btrfs_trans_handle
*trans
,
145 struct btrfs_path
*path
, int extent_inserted
,
146 struct btrfs_root
*root
, struct inode
*inode
,
147 u64 start
, size_t size
, size_t compressed_size
,
149 struct page
**compressed_pages
)
151 struct extent_buffer
*leaf
;
152 struct page
*page
= NULL
;
155 struct btrfs_file_extent_item
*ei
;
158 size_t cur_size
= size
;
159 unsigned long offset
;
161 if (compressed_size
&& compressed_pages
)
162 cur_size
= compressed_size
;
164 inode_add_bytes(inode
, size
);
166 if (!extent_inserted
) {
167 struct btrfs_key key
;
170 key
.objectid
= btrfs_ino(BTRFS_I(inode
));
172 key
.type
= BTRFS_EXTENT_DATA_KEY
;
174 datasize
= btrfs_file_extent_calc_inline_size(cur_size
);
175 path
->leave_spinning
= 1;
176 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
,
183 leaf
= path
->nodes
[0];
184 ei
= btrfs_item_ptr(leaf
, path
->slots
[0],
185 struct btrfs_file_extent_item
);
186 btrfs_set_file_extent_generation(leaf
, ei
, trans
->transid
);
187 btrfs_set_file_extent_type(leaf
, ei
, BTRFS_FILE_EXTENT_INLINE
);
188 btrfs_set_file_extent_encryption(leaf
, ei
, 0);
189 btrfs_set_file_extent_other_encoding(leaf
, ei
, 0);
190 btrfs_set_file_extent_ram_bytes(leaf
, ei
, size
);
191 ptr
= btrfs_file_extent_inline_start(ei
);
193 if (compress_type
!= BTRFS_COMPRESS_NONE
) {
196 while (compressed_size
> 0) {
197 cpage
= compressed_pages
[i
];
198 cur_size
= min_t(unsigned long, compressed_size
,
201 kaddr
= kmap_atomic(cpage
);
202 write_extent_buffer(leaf
, kaddr
, ptr
, cur_size
);
203 kunmap_atomic(kaddr
);
207 compressed_size
-= cur_size
;
209 btrfs_set_file_extent_compression(leaf
, ei
,
212 page
= find_get_page(inode
->i_mapping
,
213 start
>> PAGE_SHIFT
);
214 btrfs_set_file_extent_compression(leaf
, ei
, 0);
215 kaddr
= kmap_atomic(page
);
216 offset
= start
& (PAGE_SIZE
- 1);
217 write_extent_buffer(leaf
, kaddr
+ offset
, ptr
, size
);
218 kunmap_atomic(kaddr
);
221 btrfs_mark_buffer_dirty(leaf
);
222 btrfs_release_path(path
);
225 * we're an inline extent, so nobody can
226 * extend the file past i_size without locking
227 * a page we already have locked.
229 * We must do any isize and inode updates
230 * before we unlock the pages. Otherwise we
231 * could end up racing with unlink.
233 BTRFS_I(inode
)->disk_i_size
= inode
->i_size
;
234 ret
= btrfs_update_inode(trans
, root
, inode
);
243 * conditionally insert an inline extent into the file. This
244 * does the checks required to make sure the data is small enough
245 * to fit as an inline extent.
247 static noinline
int cow_file_range_inline(struct btrfs_root
*root
,
248 struct inode
*inode
, u64 start
,
249 u64 end
, size_t compressed_size
,
251 struct page
**compressed_pages
)
253 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
254 struct btrfs_trans_handle
*trans
;
255 u64 isize
= i_size_read(inode
);
256 u64 actual_end
= min(end
+ 1, isize
);
257 u64 inline_len
= actual_end
- start
;
258 u64 aligned_end
= ALIGN(end
, fs_info
->sectorsize
);
259 u64 data_len
= inline_len
;
261 struct btrfs_path
*path
;
262 int extent_inserted
= 0;
263 u32 extent_item_size
;
266 data_len
= compressed_size
;
269 actual_end
> fs_info
->sectorsize
||
270 data_len
> BTRFS_MAX_INLINE_DATA_SIZE(fs_info
) ||
272 (actual_end
& (fs_info
->sectorsize
- 1)) == 0) ||
274 data_len
> fs_info
->max_inline
) {
278 path
= btrfs_alloc_path();
282 trans
= btrfs_join_transaction(root
);
284 btrfs_free_path(path
);
285 return PTR_ERR(trans
);
287 trans
->block_rsv
= &fs_info
->delalloc_block_rsv
;
289 if (compressed_size
&& compressed_pages
)
290 extent_item_size
= btrfs_file_extent_calc_inline_size(
293 extent_item_size
= btrfs_file_extent_calc_inline_size(
296 ret
= __btrfs_drop_extents(trans
, root
, inode
, path
,
297 start
, aligned_end
, NULL
,
298 1, 1, extent_item_size
, &extent_inserted
);
300 btrfs_abort_transaction(trans
, ret
);
304 if (isize
> actual_end
)
305 inline_len
= min_t(u64
, isize
, actual_end
);
306 ret
= insert_inline_extent(trans
, path
, extent_inserted
,
308 inline_len
, compressed_size
,
309 compress_type
, compressed_pages
);
310 if (ret
&& ret
!= -ENOSPC
) {
311 btrfs_abort_transaction(trans
, ret
);
313 } else if (ret
== -ENOSPC
) {
318 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC
, &BTRFS_I(inode
)->runtime_flags
);
319 btrfs_delalloc_release_metadata(BTRFS_I(inode
), end
+ 1 - start
);
320 btrfs_drop_extent_cache(BTRFS_I(inode
), start
, aligned_end
- 1, 0);
323 * Don't forget to free the reserved space, as for inlined extent
324 * it won't count as data extent, free them directly here.
325 * And at reserve time, it's always aligned to page size, so
326 * just free one page here.
328 btrfs_qgroup_free_data(inode
, 0, PAGE_SIZE
);
329 btrfs_free_path(path
);
330 btrfs_end_transaction(trans
);
334 struct async_extent
{
339 unsigned long nr_pages
;
341 struct list_head list
;
346 struct btrfs_root
*root
;
347 struct page
*locked_page
;
350 struct list_head extents
;
351 struct btrfs_work work
;
354 static noinline
int add_async_extent(struct async_cow
*cow
,
355 u64 start
, u64 ram_size
,
358 unsigned long nr_pages
,
361 struct async_extent
*async_extent
;
363 async_extent
= kmalloc(sizeof(*async_extent
), GFP_NOFS
);
364 BUG_ON(!async_extent
); /* -ENOMEM */
365 async_extent
->start
= start
;
366 async_extent
->ram_size
= ram_size
;
367 async_extent
->compressed_size
= compressed_size
;
368 async_extent
->pages
= pages
;
369 async_extent
->nr_pages
= nr_pages
;
370 async_extent
->compress_type
= compress_type
;
371 list_add_tail(&async_extent
->list
, &cow
->extents
);
375 static inline int inode_need_compress(struct inode
*inode
)
377 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
380 if (btrfs_test_opt(fs_info
, FORCE_COMPRESS
))
382 /* bad compression ratios */
383 if (BTRFS_I(inode
)->flags
& BTRFS_INODE_NOCOMPRESS
)
385 if (btrfs_test_opt(fs_info
, COMPRESS
) ||
386 BTRFS_I(inode
)->flags
& BTRFS_INODE_COMPRESS
||
387 BTRFS_I(inode
)->force_compress
)
392 static inline void inode_should_defrag(struct btrfs_inode
*inode
,
393 u64 start
, u64 end
, u64 num_bytes
, u64 small_write
)
395 /* If this is a small write inside eof, kick off a defrag */
396 if (num_bytes
< small_write
&&
397 (start
> 0 || end
+ 1 < inode
->disk_i_size
))
398 btrfs_add_inode_defrag(NULL
, inode
);
402 * we create compressed extents in two phases. The first
403 * phase compresses a range of pages that have already been
404 * locked (both pages and state bits are locked).
406 * This is done inside an ordered work queue, and the compression
407 * is spread across many cpus. The actual IO submission is step
408 * two, and the ordered work queue takes care of making sure that
409 * happens in the same order things were put onto the queue by
410 * writepages and friends.
412 * If this code finds it can't get good compression, it puts an
413 * entry onto the work queue to write the uncompressed bytes. This
414 * makes sure that both compressed inodes and uncompressed inodes
415 * are written in the same order that the flusher thread sent them
418 static noinline
void compress_file_range(struct inode
*inode
,
419 struct page
*locked_page
,
421 struct async_cow
*async_cow
,
424 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
425 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
427 u64 blocksize
= fs_info
->sectorsize
;
429 u64 isize
= i_size_read(inode
);
431 struct page
**pages
= NULL
;
432 unsigned long nr_pages
;
433 unsigned long total_compressed
= 0;
434 unsigned long total_in
= 0;
437 int compress_type
= fs_info
->compress_type
;
440 inode_should_defrag(BTRFS_I(inode
), start
, end
, end
- start
+ 1,
443 actual_end
= min_t(u64
, isize
, end
+ 1);
446 nr_pages
= (end
>> PAGE_SHIFT
) - (start
>> PAGE_SHIFT
) + 1;
447 BUILD_BUG_ON((BTRFS_MAX_COMPRESSED
% PAGE_SIZE
) != 0);
448 nr_pages
= min_t(unsigned long, nr_pages
,
449 BTRFS_MAX_COMPRESSED
/ PAGE_SIZE
);
452 * we don't want to send crud past the end of i_size through
453 * compression, that's just a waste of CPU time. So, if the
454 * end of the file is before the start of our current
455 * requested range of bytes, we bail out to the uncompressed
456 * cleanup code that can deal with all of this.
458 * It isn't really the fastest way to fix things, but this is a
459 * very uncommon corner.
461 if (actual_end
<= start
)
462 goto cleanup_and_bail_uncompressed
;
464 total_compressed
= actual_end
- start
;
467 * skip compression for a small file range(<=blocksize) that
468 * isn't an inline extent, since it doesn't save disk space at all.
470 if (total_compressed
<= blocksize
&&
471 (start
> 0 || end
+ 1 < BTRFS_I(inode
)->disk_i_size
))
472 goto cleanup_and_bail_uncompressed
;
474 total_compressed
= min_t(unsigned long, total_compressed
,
475 BTRFS_MAX_UNCOMPRESSED
);
476 num_bytes
= ALIGN(end
- start
+ 1, blocksize
);
477 num_bytes
= max(blocksize
, num_bytes
);
482 * we do compression for mount -o compress and when the
483 * inode has not been flagged as nocompress. This flag can
484 * change at any time if we discover bad compression ratios.
486 if (inode_need_compress(inode
)) {
488 pages
= kcalloc(nr_pages
, sizeof(struct page
*), GFP_NOFS
);
490 /* just bail out to the uncompressed code */
494 if (BTRFS_I(inode
)->force_compress
)
495 compress_type
= BTRFS_I(inode
)->force_compress
;
498 * we need to call clear_page_dirty_for_io on each
499 * page in the range. Otherwise applications with the file
500 * mmap'd can wander in and change the page contents while
501 * we are compressing them.
503 * If the compression fails for any reason, we set the pages
504 * dirty again later on.
506 extent_range_clear_dirty_for_io(inode
, start
, end
);
508 ret
= btrfs_compress_pages(compress_type
,
509 inode
->i_mapping
, start
,
516 unsigned long offset
= total_compressed
&
518 struct page
*page
= pages
[nr_pages
- 1];
521 /* zero the tail end of the last page, we might be
522 * sending it down to disk
525 kaddr
= kmap_atomic(page
);
526 memset(kaddr
+ offset
, 0,
528 kunmap_atomic(kaddr
);
535 /* lets try to make an inline extent */
536 if (ret
|| total_in
< (actual_end
- start
)) {
537 /* we didn't compress the entire range, try
538 * to make an uncompressed inline extent.
540 ret
= cow_file_range_inline(root
, inode
, start
, end
,
541 0, BTRFS_COMPRESS_NONE
, NULL
);
543 /* try making a compressed inline extent */
544 ret
= cow_file_range_inline(root
, inode
, start
, end
,
546 compress_type
, pages
);
549 unsigned long clear_flags
= EXTENT_DELALLOC
|
551 unsigned long page_error_op
;
553 clear_flags
|= (ret
< 0) ? EXTENT_DO_ACCOUNTING
: 0;
554 page_error_op
= ret
< 0 ? PAGE_SET_ERROR
: 0;
557 * inline extent creation worked or returned error,
558 * we don't need to create any more async work items.
559 * Unlock and free up our temp pages.
561 extent_clear_unlock_delalloc(inode
, start
, end
, end
,
568 btrfs_free_reserved_data_space_noquota(inode
, start
,
576 * we aren't doing an inline extent round the compressed size
577 * up to a block size boundary so the allocator does sane
580 total_compressed
= ALIGN(total_compressed
, blocksize
);
583 * one last check to make sure the compression is really a
584 * win, compare the page count read with the blocks on disk
586 total_in
= ALIGN(total_in
, PAGE_SIZE
);
587 if (total_compressed
>= total_in
) {
590 num_bytes
= total_in
;
594 * The async work queues will take care of doing actual
595 * allocation on disk for these compressed pages, and
596 * will submit them to the elevator.
598 add_async_extent(async_cow
, start
, num_bytes
,
599 total_compressed
, pages
, nr_pages
,
602 if (start
+ num_bytes
< end
) {
613 * the compression code ran but failed to make things smaller,
614 * free any pages it allocated and our page pointer array
616 for (i
= 0; i
< nr_pages
; i
++) {
617 WARN_ON(pages
[i
]->mapping
);
622 total_compressed
= 0;
625 /* flag the file so we don't compress in the future */
626 if (!btrfs_test_opt(fs_info
, FORCE_COMPRESS
) &&
627 !(BTRFS_I(inode
)->force_compress
)) {
628 BTRFS_I(inode
)->flags
|= BTRFS_INODE_NOCOMPRESS
;
631 cleanup_and_bail_uncompressed
:
633 * No compression, but we still need to write the pages in the file
634 * we've been given so far. redirty the locked page if it corresponds
635 * to our extent and set things up for the async work queue to run
636 * cow_file_range to do the normal delalloc dance.
638 if (page_offset(locked_page
) >= start
&&
639 page_offset(locked_page
) <= end
)
640 __set_page_dirty_nobuffers(locked_page
);
641 /* unlocked later on in the async handlers */
644 extent_range_redirty_for_io(inode
, start
, end
);
645 add_async_extent(async_cow
, start
, end
- start
+ 1, 0, NULL
, 0,
646 BTRFS_COMPRESS_NONE
);
652 for (i
= 0; i
< nr_pages
; i
++) {
653 WARN_ON(pages
[i
]->mapping
);
659 static void free_async_extent_pages(struct async_extent
*async_extent
)
663 if (!async_extent
->pages
)
666 for (i
= 0; i
< async_extent
->nr_pages
; i
++) {
667 WARN_ON(async_extent
->pages
[i
]->mapping
);
668 put_page(async_extent
->pages
[i
]);
670 kfree(async_extent
->pages
);
671 async_extent
->nr_pages
= 0;
672 async_extent
->pages
= NULL
;
676 * phase two of compressed writeback. This is the ordered portion
677 * of the code, which only gets called in the order the work was
678 * queued. We walk all the async extents created by compress_file_range
679 * and send them down to the disk.
681 static noinline
void submit_compressed_extents(struct inode
*inode
,
682 struct async_cow
*async_cow
)
684 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
685 struct async_extent
*async_extent
;
687 struct btrfs_key ins
;
688 struct extent_map
*em
;
689 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
690 struct extent_io_tree
*io_tree
;
694 while (!list_empty(&async_cow
->extents
)) {
695 async_extent
= list_entry(async_cow
->extents
.next
,
696 struct async_extent
, list
);
697 list_del(&async_extent
->list
);
699 io_tree
= &BTRFS_I(inode
)->io_tree
;
702 /* did the compression code fall back to uncompressed IO? */
703 if (!async_extent
->pages
) {
704 int page_started
= 0;
705 unsigned long nr_written
= 0;
707 lock_extent(io_tree
, async_extent
->start
,
708 async_extent
->start
+
709 async_extent
->ram_size
- 1);
711 /* allocate blocks */
712 ret
= cow_file_range(inode
, async_cow
->locked_page
,
714 async_extent
->start
+
715 async_extent
->ram_size
- 1,
716 async_extent
->start
+
717 async_extent
->ram_size
- 1,
718 &page_started
, &nr_written
, 0,
724 * if page_started, cow_file_range inserted an
725 * inline extent and took care of all the unlocking
726 * and IO for us. Otherwise, we need to submit
727 * all those pages down to the drive.
729 if (!page_started
&& !ret
)
730 extent_write_locked_range(io_tree
,
731 inode
, async_extent
->start
,
732 async_extent
->start
+
733 async_extent
->ram_size
- 1,
737 unlock_page(async_cow
->locked_page
);
743 lock_extent(io_tree
, async_extent
->start
,
744 async_extent
->start
+ async_extent
->ram_size
- 1);
746 ret
= btrfs_reserve_extent(root
, async_extent
->ram_size
,
747 async_extent
->compressed_size
,
748 async_extent
->compressed_size
,
749 0, alloc_hint
, &ins
, 1, 1);
751 free_async_extent_pages(async_extent
);
753 if (ret
== -ENOSPC
) {
754 unlock_extent(io_tree
, async_extent
->start
,
755 async_extent
->start
+
756 async_extent
->ram_size
- 1);
759 * we need to redirty the pages if we decide to
760 * fallback to uncompressed IO, otherwise we
761 * will not submit these pages down to lower
764 extent_range_redirty_for_io(inode
,
766 async_extent
->start
+
767 async_extent
->ram_size
- 1);
774 * here we're doing allocation and writeback of the
777 em
= create_io_em(inode
, async_extent
->start
,
778 async_extent
->ram_size
, /* len */
779 async_extent
->start
, /* orig_start */
780 ins
.objectid
, /* block_start */
781 ins
.offset
, /* block_len */
782 ins
.offset
, /* orig_block_len */
783 async_extent
->ram_size
, /* ram_bytes */
784 async_extent
->compress_type
,
785 BTRFS_ORDERED_COMPRESSED
);
787 /* ret value is not necessary due to void function */
788 goto out_free_reserve
;
791 ret
= btrfs_add_ordered_extent_compress(inode
,
794 async_extent
->ram_size
,
796 BTRFS_ORDERED_COMPRESSED
,
797 async_extent
->compress_type
);
799 btrfs_drop_extent_cache(BTRFS_I(inode
),
801 async_extent
->start
+
802 async_extent
->ram_size
- 1, 0);
803 goto out_free_reserve
;
805 btrfs_dec_block_group_reservations(fs_info
, ins
.objectid
);
808 * clear dirty, set writeback and unlock the pages.
810 extent_clear_unlock_delalloc(inode
, async_extent
->start
,
811 async_extent
->start
+
812 async_extent
->ram_size
- 1,
813 async_extent
->start
+
814 async_extent
->ram_size
- 1,
815 NULL
, EXTENT_LOCKED
| EXTENT_DELALLOC
,
816 PAGE_UNLOCK
| PAGE_CLEAR_DIRTY
|
818 ret
= btrfs_submit_compressed_write(inode
,
820 async_extent
->ram_size
,
822 ins
.offset
, async_extent
->pages
,
823 async_extent
->nr_pages
);
825 struct extent_io_tree
*tree
= &BTRFS_I(inode
)->io_tree
;
826 struct page
*p
= async_extent
->pages
[0];
827 const u64 start
= async_extent
->start
;
828 const u64 end
= start
+ async_extent
->ram_size
- 1;
830 p
->mapping
= inode
->i_mapping
;
831 tree
->ops
->writepage_end_io_hook(p
, start
, end
,
834 extent_clear_unlock_delalloc(inode
, start
, end
, end
,
838 free_async_extent_pages(async_extent
);
840 alloc_hint
= ins
.objectid
+ ins
.offset
;
846 btrfs_dec_block_group_reservations(fs_info
, ins
.objectid
);
847 btrfs_free_reserved_extent(fs_info
, ins
.objectid
, ins
.offset
, 1);
849 extent_clear_unlock_delalloc(inode
, async_extent
->start
,
850 async_extent
->start
+
851 async_extent
->ram_size
- 1,
852 async_extent
->start
+
853 async_extent
->ram_size
- 1,
854 NULL
, EXTENT_LOCKED
| EXTENT_DELALLOC
|
855 EXTENT_DEFRAG
| EXTENT_DO_ACCOUNTING
,
856 PAGE_UNLOCK
| PAGE_CLEAR_DIRTY
|
857 PAGE_SET_WRITEBACK
| PAGE_END_WRITEBACK
|
859 free_async_extent_pages(async_extent
);
864 static u64
get_extent_allocation_hint(struct inode
*inode
, u64 start
,
867 struct extent_map_tree
*em_tree
= &BTRFS_I(inode
)->extent_tree
;
868 struct extent_map
*em
;
871 read_lock(&em_tree
->lock
);
872 em
= search_extent_mapping(em_tree
, start
, num_bytes
);
875 * if block start isn't an actual block number then find the
876 * first block in this inode and use that as a hint. If that
877 * block is also bogus then just don't worry about it.
879 if (em
->block_start
>= EXTENT_MAP_LAST_BYTE
) {
881 em
= search_extent_mapping(em_tree
, 0, 0);
882 if (em
&& em
->block_start
< EXTENT_MAP_LAST_BYTE
)
883 alloc_hint
= em
->block_start
;
887 alloc_hint
= em
->block_start
;
891 read_unlock(&em_tree
->lock
);
897 * when extent_io.c finds a delayed allocation range in the file,
898 * the call backs end up in this code. The basic idea is to
899 * allocate extents on disk for the range, and create ordered data structs
900 * in ram to track those extents.
902 * locked_page is the page that writepage had locked already. We use
903 * it to make sure we don't do extra locks or unlocks.
905 * *page_started is set to one if we unlock locked_page and do everything
906 * required to start IO on it. It may be clean and already done with
909 static noinline
int cow_file_range(struct inode
*inode
,
910 struct page
*locked_page
,
911 u64 start
, u64 end
, u64 delalloc_end
,
912 int *page_started
, unsigned long *nr_written
,
913 int unlock
, struct btrfs_dedupe_hash
*hash
)
915 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
916 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
919 unsigned long ram_size
;
922 u64 blocksize
= fs_info
->sectorsize
;
923 struct btrfs_key ins
;
924 struct extent_map
*em
;
927 if (btrfs_is_free_space_inode(BTRFS_I(inode
))) {
933 num_bytes
= ALIGN(end
- start
+ 1, blocksize
);
934 num_bytes
= max(blocksize
, num_bytes
);
935 disk_num_bytes
= num_bytes
;
937 inode_should_defrag(BTRFS_I(inode
), start
, end
, num_bytes
, SZ_64K
);
940 /* lets try to make an inline extent */
941 ret
= cow_file_range_inline(root
, inode
, start
, end
, 0,
942 BTRFS_COMPRESS_NONE
, NULL
);
944 extent_clear_unlock_delalloc(inode
, start
, end
,
946 EXTENT_LOCKED
| EXTENT_DELALLOC
|
947 EXTENT_DEFRAG
, PAGE_UNLOCK
|
948 PAGE_CLEAR_DIRTY
| PAGE_SET_WRITEBACK
|
950 btrfs_free_reserved_data_space_noquota(inode
, start
,
952 *nr_written
= *nr_written
+
953 (end
- start
+ PAGE_SIZE
) / PAGE_SIZE
;
956 } else if (ret
< 0) {
961 BUG_ON(disk_num_bytes
>
962 btrfs_super_total_bytes(fs_info
->super_copy
));
964 alloc_hint
= get_extent_allocation_hint(inode
, start
, num_bytes
);
965 btrfs_drop_extent_cache(BTRFS_I(inode
), start
,
966 start
+ num_bytes
- 1, 0);
968 while (disk_num_bytes
> 0) {
971 cur_alloc_size
= disk_num_bytes
;
972 ret
= btrfs_reserve_extent(root
, cur_alloc_size
, cur_alloc_size
,
973 fs_info
->sectorsize
, 0, alloc_hint
,
978 ram_size
= ins
.offset
;
979 em
= create_io_em(inode
, start
, ins
.offset
, /* len */
980 start
, /* orig_start */
981 ins
.objectid
, /* block_start */
982 ins
.offset
, /* block_len */
983 ins
.offset
, /* orig_block_len */
984 ram_size
, /* ram_bytes */
985 BTRFS_COMPRESS_NONE
, /* compress_type */
986 BTRFS_ORDERED_REGULAR
/* type */);
991 cur_alloc_size
= ins
.offset
;
992 ret
= btrfs_add_ordered_extent(inode
, start
, ins
.objectid
,
993 ram_size
, cur_alloc_size
, 0);
995 goto out_drop_extent_cache
;
997 if (root
->root_key
.objectid
==
998 BTRFS_DATA_RELOC_TREE_OBJECTID
) {
999 ret
= btrfs_reloc_clone_csums(inode
, start
,
1002 goto out_drop_extent_cache
;
1005 btrfs_dec_block_group_reservations(fs_info
, ins
.objectid
);
1007 if (disk_num_bytes
< cur_alloc_size
)
1010 /* we're not doing compressed IO, don't unlock the first
1011 * page (which the caller expects to stay locked), don't
1012 * clear any dirty bits and don't set any writeback bits
1014 * Do set the Private2 bit so we know this page was properly
1015 * setup for writepage
1017 op
= unlock
? PAGE_UNLOCK
: 0;
1018 op
|= PAGE_SET_PRIVATE2
;
1020 extent_clear_unlock_delalloc(inode
, start
,
1021 start
+ ram_size
- 1,
1022 delalloc_end
, locked_page
,
1023 EXTENT_LOCKED
| EXTENT_DELALLOC
,
1025 disk_num_bytes
-= cur_alloc_size
;
1026 num_bytes
-= cur_alloc_size
;
1027 alloc_hint
= ins
.objectid
+ ins
.offset
;
1028 start
+= cur_alloc_size
;
1033 out_drop_extent_cache
:
1034 btrfs_drop_extent_cache(BTRFS_I(inode
), start
, start
+ ram_size
- 1, 0);
1036 btrfs_dec_block_group_reservations(fs_info
, ins
.objectid
);
1037 btrfs_free_reserved_extent(fs_info
, ins
.objectid
, ins
.offset
, 1);
1039 extent_clear_unlock_delalloc(inode
, start
, end
, delalloc_end
,
1041 EXTENT_LOCKED
| EXTENT_DO_ACCOUNTING
|
1042 EXTENT_DELALLOC
| EXTENT_DEFRAG
,
1043 PAGE_UNLOCK
| PAGE_CLEAR_DIRTY
|
1044 PAGE_SET_WRITEBACK
| PAGE_END_WRITEBACK
);
1049 * work queue call back to started compression on a file and pages
1051 static noinline
void async_cow_start(struct btrfs_work
*work
)
1053 struct async_cow
*async_cow
;
1055 async_cow
= container_of(work
, struct async_cow
, work
);
1057 compress_file_range(async_cow
->inode
, async_cow
->locked_page
,
1058 async_cow
->start
, async_cow
->end
, async_cow
,
1060 if (num_added
== 0) {
1061 btrfs_add_delayed_iput(async_cow
->inode
);
1062 async_cow
->inode
= NULL
;
1067 * work queue call back to submit previously compressed pages
1069 static noinline
void async_cow_submit(struct btrfs_work
*work
)
1071 struct btrfs_fs_info
*fs_info
;
1072 struct async_cow
*async_cow
;
1073 struct btrfs_root
*root
;
1074 unsigned long nr_pages
;
1076 async_cow
= container_of(work
, struct async_cow
, work
);
1078 root
= async_cow
->root
;
1079 fs_info
= root
->fs_info
;
1080 nr_pages
= (async_cow
->end
- async_cow
->start
+ PAGE_SIZE
) >>
1084 * atomic_sub_return implies a barrier for waitqueue_active
1086 if (atomic_sub_return(nr_pages
, &fs_info
->async_delalloc_pages
) <
1088 waitqueue_active(&fs_info
->async_submit_wait
))
1089 wake_up(&fs_info
->async_submit_wait
);
1091 if (async_cow
->inode
)
1092 submit_compressed_extents(async_cow
->inode
, async_cow
);
1095 static noinline
void async_cow_free(struct btrfs_work
*work
)
1097 struct async_cow
*async_cow
;
1098 async_cow
= container_of(work
, struct async_cow
, work
);
1099 if (async_cow
->inode
)
1100 btrfs_add_delayed_iput(async_cow
->inode
);
1104 static int cow_file_range_async(struct inode
*inode
, struct page
*locked_page
,
1105 u64 start
, u64 end
, int *page_started
,
1106 unsigned long *nr_written
)
1108 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
1109 struct async_cow
*async_cow
;
1110 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
1111 unsigned long nr_pages
;
1114 clear_extent_bit(&BTRFS_I(inode
)->io_tree
, start
, end
, EXTENT_LOCKED
,
1115 1, 0, NULL
, GFP_NOFS
);
1116 while (start
< end
) {
1117 async_cow
= kmalloc(sizeof(*async_cow
), GFP_NOFS
);
1118 BUG_ON(!async_cow
); /* -ENOMEM */
1119 async_cow
->inode
= igrab(inode
);
1120 async_cow
->root
= root
;
1121 async_cow
->locked_page
= locked_page
;
1122 async_cow
->start
= start
;
1124 if (BTRFS_I(inode
)->flags
& BTRFS_INODE_NOCOMPRESS
&&
1125 !btrfs_test_opt(fs_info
, FORCE_COMPRESS
))
1128 cur_end
= min(end
, start
+ SZ_512K
- 1);
1130 async_cow
->end
= cur_end
;
1131 INIT_LIST_HEAD(&async_cow
->extents
);
1133 btrfs_init_work(&async_cow
->work
,
1134 btrfs_delalloc_helper
,
1135 async_cow_start
, async_cow_submit
,
1138 nr_pages
= (cur_end
- start
+ PAGE_SIZE
) >>
1140 atomic_add(nr_pages
, &fs_info
->async_delalloc_pages
);
1142 btrfs_queue_work(fs_info
->delalloc_workers
, &async_cow
->work
);
1144 while (atomic_read(&fs_info
->async_submit_draining
) &&
1145 atomic_read(&fs_info
->async_delalloc_pages
)) {
1146 wait_event(fs_info
->async_submit_wait
,
1147 (atomic_read(&fs_info
->async_delalloc_pages
) ==
1151 *nr_written
+= nr_pages
;
1152 start
= cur_end
+ 1;
1158 static noinline
int csum_exist_in_range(struct btrfs_fs_info
*fs_info
,
1159 u64 bytenr
, u64 num_bytes
)
1162 struct btrfs_ordered_sum
*sums
;
1165 ret
= btrfs_lookup_csums_range(fs_info
->csum_root
, bytenr
,
1166 bytenr
+ num_bytes
- 1, &list
, 0);
1167 if (ret
== 0 && list_empty(&list
))
1170 while (!list_empty(&list
)) {
1171 sums
= list_entry(list
.next
, struct btrfs_ordered_sum
, list
);
1172 list_del(&sums
->list
);
1179 * when nowcow writeback call back. This checks for snapshots or COW copies
1180 * of the extents that exist in the file, and COWs the file as required.
1182 * If no cow copies or snapshots exist, we write directly to the existing
1185 static noinline
int run_delalloc_nocow(struct inode
*inode
,
1186 struct page
*locked_page
,
1187 u64 start
, u64 end
, int *page_started
, int force
,
1188 unsigned long *nr_written
)
1190 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
1191 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
1192 struct extent_buffer
*leaf
;
1193 struct btrfs_path
*path
;
1194 struct btrfs_file_extent_item
*fi
;
1195 struct btrfs_key found_key
;
1196 struct extent_map
*em
;
1211 u64 ino
= btrfs_ino(BTRFS_I(inode
));
1213 path
= btrfs_alloc_path();
1215 extent_clear_unlock_delalloc(inode
, start
, end
, end
,
1217 EXTENT_LOCKED
| EXTENT_DELALLOC
|
1218 EXTENT_DO_ACCOUNTING
|
1219 EXTENT_DEFRAG
, PAGE_UNLOCK
|
1221 PAGE_SET_WRITEBACK
|
1222 PAGE_END_WRITEBACK
);
1226 nolock
= btrfs_is_free_space_inode(BTRFS_I(inode
));
1228 cow_start
= (u64
)-1;
1231 ret
= btrfs_lookup_file_extent(NULL
, root
, path
, ino
,
1235 if (ret
> 0 && path
->slots
[0] > 0 && check_prev
) {
1236 leaf
= path
->nodes
[0];
1237 btrfs_item_key_to_cpu(leaf
, &found_key
,
1238 path
->slots
[0] - 1);
1239 if (found_key
.objectid
== ino
&&
1240 found_key
.type
== BTRFS_EXTENT_DATA_KEY
)
1245 leaf
= path
->nodes
[0];
1246 if (path
->slots
[0] >= btrfs_header_nritems(leaf
)) {
1247 ret
= btrfs_next_leaf(root
, path
);
1252 leaf
= path
->nodes
[0];
1258 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
1260 if (found_key
.objectid
> ino
)
1262 if (WARN_ON_ONCE(found_key
.objectid
< ino
) ||
1263 found_key
.type
< BTRFS_EXTENT_DATA_KEY
) {
1267 if (found_key
.type
> BTRFS_EXTENT_DATA_KEY
||
1268 found_key
.offset
> end
)
1271 if (found_key
.offset
> cur_offset
) {
1272 extent_end
= found_key
.offset
;
1277 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
1278 struct btrfs_file_extent_item
);
1279 extent_type
= btrfs_file_extent_type(leaf
, fi
);
1281 ram_bytes
= btrfs_file_extent_ram_bytes(leaf
, fi
);
1282 if (extent_type
== BTRFS_FILE_EXTENT_REG
||
1283 extent_type
== BTRFS_FILE_EXTENT_PREALLOC
) {
1284 disk_bytenr
= btrfs_file_extent_disk_bytenr(leaf
, fi
);
1285 extent_offset
= btrfs_file_extent_offset(leaf
, fi
);
1286 extent_end
= found_key
.offset
+
1287 btrfs_file_extent_num_bytes(leaf
, fi
);
1289 btrfs_file_extent_disk_num_bytes(leaf
, fi
);
1290 if (extent_end
<= start
) {
1294 if (disk_bytenr
== 0)
1296 if (btrfs_file_extent_compression(leaf
, fi
) ||
1297 btrfs_file_extent_encryption(leaf
, fi
) ||
1298 btrfs_file_extent_other_encoding(leaf
, fi
))
1300 if (extent_type
== BTRFS_FILE_EXTENT_REG
&& !force
)
1302 if (btrfs_extent_readonly(fs_info
, disk_bytenr
))
1304 if (btrfs_cross_ref_exist(root
, ino
,
1306 extent_offset
, disk_bytenr
))
1308 disk_bytenr
+= extent_offset
;
1309 disk_bytenr
+= cur_offset
- found_key
.offset
;
1310 num_bytes
= min(end
+ 1, extent_end
) - cur_offset
;
1312 * if there are pending snapshots for this root,
1313 * we fall into common COW way.
1316 err
= btrfs_start_write_no_snapshoting(root
);
1321 * force cow if csum exists in the range.
1322 * this ensure that csum for a given extent are
1323 * either valid or do not exist.
1325 if (csum_exist_in_range(fs_info
, disk_bytenr
,
1328 btrfs_end_write_no_snapshoting(root
);
1331 if (!btrfs_inc_nocow_writers(fs_info
, disk_bytenr
)) {
1333 btrfs_end_write_no_snapshoting(root
);
1337 } else if (extent_type
== BTRFS_FILE_EXTENT_INLINE
) {
1338 extent_end
= found_key
.offset
+
1339 btrfs_file_extent_inline_len(leaf
,
1340 path
->slots
[0], fi
);
1341 extent_end
= ALIGN(extent_end
,
1342 fs_info
->sectorsize
);
1347 if (extent_end
<= start
) {
1349 if (!nolock
&& nocow
)
1350 btrfs_end_write_no_snapshoting(root
);
1352 btrfs_dec_nocow_writers(fs_info
, disk_bytenr
);
1356 if (cow_start
== (u64
)-1)
1357 cow_start
= cur_offset
;
1358 cur_offset
= extent_end
;
1359 if (cur_offset
> end
)
1365 btrfs_release_path(path
);
1366 if (cow_start
!= (u64
)-1) {
1367 ret
= cow_file_range(inode
, locked_page
,
1368 cow_start
, found_key
.offset
- 1,
1369 end
, page_started
, nr_written
, 1,
1372 if (!nolock
&& nocow
)
1373 btrfs_end_write_no_snapshoting(root
);
1375 btrfs_dec_nocow_writers(fs_info
,
1379 cow_start
= (u64
)-1;
1382 if (extent_type
== BTRFS_FILE_EXTENT_PREALLOC
) {
1383 u64 orig_start
= found_key
.offset
- extent_offset
;
1385 em
= create_io_em(inode
, cur_offset
, num_bytes
,
1387 disk_bytenr
, /* block_start */
1388 num_bytes
, /* block_len */
1389 disk_num_bytes
, /* orig_block_len */
1390 ram_bytes
, BTRFS_COMPRESS_NONE
,
1391 BTRFS_ORDERED_PREALLOC
);
1393 if (!nolock
&& nocow
)
1394 btrfs_end_write_no_snapshoting(root
);
1396 btrfs_dec_nocow_writers(fs_info
,
1401 free_extent_map(em
);
1404 if (extent_type
== BTRFS_FILE_EXTENT_PREALLOC
) {
1405 type
= BTRFS_ORDERED_PREALLOC
;
1407 type
= BTRFS_ORDERED_NOCOW
;
1410 ret
= btrfs_add_ordered_extent(inode
, cur_offset
, disk_bytenr
,
1411 num_bytes
, num_bytes
, type
);
1413 btrfs_dec_nocow_writers(fs_info
, disk_bytenr
);
1414 BUG_ON(ret
); /* -ENOMEM */
1416 if (root
->root_key
.objectid
==
1417 BTRFS_DATA_RELOC_TREE_OBJECTID
) {
1418 ret
= btrfs_reloc_clone_csums(inode
, cur_offset
,
1421 if (!nolock
&& nocow
)
1422 btrfs_end_write_no_snapshoting(root
);
1427 extent_clear_unlock_delalloc(inode
, cur_offset
,
1428 cur_offset
+ num_bytes
- 1, end
,
1429 locked_page
, EXTENT_LOCKED
|
1431 EXTENT_CLEAR_DATA_RESV
,
1432 PAGE_UNLOCK
| PAGE_SET_PRIVATE2
);
1434 if (!nolock
&& nocow
)
1435 btrfs_end_write_no_snapshoting(root
);
1436 cur_offset
= extent_end
;
1437 if (cur_offset
> end
)
1440 btrfs_release_path(path
);
1442 if (cur_offset
<= end
&& cow_start
== (u64
)-1) {
1443 cow_start
= cur_offset
;
1447 if (cow_start
!= (u64
)-1) {
1448 ret
= cow_file_range(inode
, locked_page
, cow_start
, end
, end
,
1449 page_started
, nr_written
, 1, NULL
);
1455 if (ret
&& cur_offset
< end
)
1456 extent_clear_unlock_delalloc(inode
, cur_offset
, end
, end
,
1457 locked_page
, EXTENT_LOCKED
|
1458 EXTENT_DELALLOC
| EXTENT_DEFRAG
|
1459 EXTENT_DO_ACCOUNTING
, PAGE_UNLOCK
|
1461 PAGE_SET_WRITEBACK
|
1462 PAGE_END_WRITEBACK
);
1463 btrfs_free_path(path
);
1467 static inline int need_force_cow(struct inode
*inode
, u64 start
, u64 end
)
1470 if (!(BTRFS_I(inode
)->flags
& BTRFS_INODE_NODATACOW
) &&
1471 !(BTRFS_I(inode
)->flags
& BTRFS_INODE_PREALLOC
))
1475 * @defrag_bytes is a hint value, no spinlock held here,
1476 * if is not zero, it means the file is defragging.
1477 * Force cow if given extent needs to be defragged.
1479 if (BTRFS_I(inode
)->defrag_bytes
&&
1480 test_range_bit(&BTRFS_I(inode
)->io_tree
, start
, end
,
1481 EXTENT_DEFRAG
, 0, NULL
))
1488 * extent_io.c call back to do delayed allocation processing
1490 static int run_delalloc_range(struct inode
*inode
, struct page
*locked_page
,
1491 u64 start
, u64 end
, int *page_started
,
1492 unsigned long *nr_written
)
1495 int force_cow
= need_force_cow(inode
, start
, end
);
1497 if (BTRFS_I(inode
)->flags
& BTRFS_INODE_NODATACOW
&& !force_cow
) {
1498 ret
= run_delalloc_nocow(inode
, locked_page
, start
, end
,
1499 page_started
, 1, nr_written
);
1500 } else if (BTRFS_I(inode
)->flags
& BTRFS_INODE_PREALLOC
&& !force_cow
) {
1501 ret
= run_delalloc_nocow(inode
, locked_page
, start
, end
,
1502 page_started
, 0, nr_written
);
1503 } else if (!inode_need_compress(inode
)) {
1504 ret
= cow_file_range(inode
, locked_page
, start
, end
, end
,
1505 page_started
, nr_written
, 1, NULL
);
1507 set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT
,
1508 &BTRFS_I(inode
)->runtime_flags
);
1509 ret
= cow_file_range_async(inode
, locked_page
, start
, end
,
1510 page_started
, nr_written
);
1515 static void btrfs_split_extent_hook(struct inode
*inode
,
1516 struct extent_state
*orig
, u64 split
)
1520 /* not delalloc, ignore it */
1521 if (!(orig
->state
& EXTENT_DELALLOC
))
1524 size
= orig
->end
- orig
->start
+ 1;
1525 if (size
> BTRFS_MAX_EXTENT_SIZE
) {
1530 * See the explanation in btrfs_merge_extent_hook, the same
1531 * applies here, just in reverse.
1533 new_size
= orig
->end
- split
+ 1;
1534 num_extents
= count_max_extents(new_size
);
1535 new_size
= split
- orig
->start
;
1536 num_extents
+= count_max_extents(new_size
);
1537 if (count_max_extents(size
) >= num_extents
)
1541 spin_lock(&BTRFS_I(inode
)->lock
);
1542 BTRFS_I(inode
)->outstanding_extents
++;
1543 spin_unlock(&BTRFS_I(inode
)->lock
);
1547 * extent_io.c merge_extent_hook, used to track merged delayed allocation
1548 * extents so we can keep track of new extents that are just merged onto old
1549 * extents, such as when we are doing sequential writes, so we can properly
1550 * account for the metadata space we'll need.
1552 static void btrfs_merge_extent_hook(struct inode
*inode
,
1553 struct extent_state
*new,
1554 struct extent_state
*other
)
1556 u64 new_size
, old_size
;
1559 /* not delalloc, ignore it */
1560 if (!(other
->state
& EXTENT_DELALLOC
))
1563 if (new->start
> other
->start
)
1564 new_size
= new->end
- other
->start
+ 1;
1566 new_size
= other
->end
- new->start
+ 1;
1568 /* we're not bigger than the max, unreserve the space and go */
1569 if (new_size
<= BTRFS_MAX_EXTENT_SIZE
) {
1570 spin_lock(&BTRFS_I(inode
)->lock
);
1571 BTRFS_I(inode
)->outstanding_extents
--;
1572 spin_unlock(&BTRFS_I(inode
)->lock
);
1577 * We have to add up either side to figure out how many extents were
1578 * accounted for before we merged into one big extent. If the number of
1579 * extents we accounted for is <= the amount we need for the new range
1580 * then we can return, otherwise drop. Think of it like this
1584 * So we've grown the extent by a MAX_SIZE extent, this would mean we
1585 * need 2 outstanding extents, on one side we have 1 and the other side
1586 * we have 1 so they are == and we can return. But in this case
1588 * [MAX_SIZE+4k][MAX_SIZE+4k]
1590 * Each range on their own accounts for 2 extents, but merged together
1591 * they are only 3 extents worth of accounting, so we need to drop in
1594 old_size
= other
->end
- other
->start
+ 1;
1595 num_extents
= count_max_extents(old_size
);
1596 old_size
= new->end
- new->start
+ 1;
1597 num_extents
+= count_max_extents(old_size
);
1598 if (count_max_extents(new_size
) >= num_extents
)
1601 spin_lock(&BTRFS_I(inode
)->lock
);
1602 BTRFS_I(inode
)->outstanding_extents
--;
1603 spin_unlock(&BTRFS_I(inode
)->lock
);
1606 static void btrfs_add_delalloc_inodes(struct btrfs_root
*root
,
1607 struct inode
*inode
)
1609 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
1611 spin_lock(&root
->delalloc_lock
);
1612 if (list_empty(&BTRFS_I(inode
)->delalloc_inodes
)) {
1613 list_add_tail(&BTRFS_I(inode
)->delalloc_inodes
,
1614 &root
->delalloc_inodes
);
1615 set_bit(BTRFS_INODE_IN_DELALLOC_LIST
,
1616 &BTRFS_I(inode
)->runtime_flags
);
1617 root
->nr_delalloc_inodes
++;
1618 if (root
->nr_delalloc_inodes
== 1) {
1619 spin_lock(&fs_info
->delalloc_root_lock
);
1620 BUG_ON(!list_empty(&root
->delalloc_root
));
1621 list_add_tail(&root
->delalloc_root
,
1622 &fs_info
->delalloc_roots
);
1623 spin_unlock(&fs_info
->delalloc_root_lock
);
1626 spin_unlock(&root
->delalloc_lock
);
1629 static void btrfs_del_delalloc_inode(struct btrfs_root
*root
,
1630 struct btrfs_inode
*inode
)
1632 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->vfs_inode
.i_sb
);
1634 spin_lock(&root
->delalloc_lock
);
1635 if (!list_empty(&inode
->delalloc_inodes
)) {
1636 list_del_init(&inode
->delalloc_inodes
);
1637 clear_bit(BTRFS_INODE_IN_DELALLOC_LIST
,
1638 &inode
->runtime_flags
);
1639 root
->nr_delalloc_inodes
--;
1640 if (!root
->nr_delalloc_inodes
) {
1641 spin_lock(&fs_info
->delalloc_root_lock
);
1642 BUG_ON(list_empty(&root
->delalloc_root
));
1643 list_del_init(&root
->delalloc_root
);
1644 spin_unlock(&fs_info
->delalloc_root_lock
);
1647 spin_unlock(&root
->delalloc_lock
);
1651 * extent_io.c set_bit_hook, used to track delayed allocation
1652 * bytes in this file, and to maintain the list of inodes that
1653 * have pending delalloc work to be done.
1655 static void btrfs_set_bit_hook(struct inode
*inode
,
1656 struct extent_state
*state
, unsigned *bits
)
1659 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
1661 if ((*bits
& EXTENT_DEFRAG
) && !(*bits
& EXTENT_DELALLOC
))
1664 * set_bit and clear bit hooks normally require _irqsave/restore
1665 * but in this case, we are only testing for the DELALLOC
1666 * bit, which is only set or cleared with irqs on
1668 if (!(state
->state
& EXTENT_DELALLOC
) && (*bits
& EXTENT_DELALLOC
)) {
1669 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
1670 u64 len
= state
->end
+ 1 - state
->start
;
1671 bool do_list
= !btrfs_is_free_space_inode(BTRFS_I(inode
));
1673 if (*bits
& EXTENT_FIRST_DELALLOC
) {
1674 *bits
&= ~EXTENT_FIRST_DELALLOC
;
1676 spin_lock(&BTRFS_I(inode
)->lock
);
1677 BTRFS_I(inode
)->outstanding_extents
++;
1678 spin_unlock(&BTRFS_I(inode
)->lock
);
1681 /* For sanity tests */
1682 if (btrfs_is_testing(fs_info
))
1685 __percpu_counter_add(&fs_info
->delalloc_bytes
, len
,
1686 fs_info
->delalloc_batch
);
1687 spin_lock(&BTRFS_I(inode
)->lock
);
1688 BTRFS_I(inode
)->delalloc_bytes
+= len
;
1689 if (*bits
& EXTENT_DEFRAG
)
1690 BTRFS_I(inode
)->defrag_bytes
+= len
;
1691 if (do_list
&& !test_bit(BTRFS_INODE_IN_DELALLOC_LIST
,
1692 &BTRFS_I(inode
)->runtime_flags
))
1693 btrfs_add_delalloc_inodes(root
, inode
);
1694 spin_unlock(&BTRFS_I(inode
)->lock
);
1699 * extent_io.c clear_bit_hook, see set_bit_hook for why
1701 static void btrfs_clear_bit_hook(struct btrfs_inode
*inode
,
1702 struct extent_state
*state
,
1705 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->vfs_inode
.i_sb
);
1706 u64 len
= state
->end
+ 1 - state
->start
;
1707 u32 num_extents
= count_max_extents(len
);
1709 spin_lock(&inode
->lock
);
1710 if ((state
->state
& EXTENT_DEFRAG
) && (*bits
& EXTENT_DEFRAG
))
1711 inode
->defrag_bytes
-= len
;
1712 spin_unlock(&inode
->lock
);
1715 * set_bit and clear bit hooks normally require _irqsave/restore
1716 * but in this case, we are only testing for the DELALLOC
1717 * bit, which is only set or cleared with irqs on
1719 if ((state
->state
& EXTENT_DELALLOC
) && (*bits
& EXTENT_DELALLOC
)) {
1720 struct btrfs_root
*root
= inode
->root
;
1721 bool do_list
= !btrfs_is_free_space_inode(inode
);
1723 if (*bits
& EXTENT_FIRST_DELALLOC
) {
1724 *bits
&= ~EXTENT_FIRST_DELALLOC
;
1725 } else if (!(*bits
& EXTENT_DO_ACCOUNTING
)) {
1726 spin_lock(&inode
->lock
);
1727 inode
->outstanding_extents
-= num_extents
;
1728 spin_unlock(&inode
->lock
);
1732 * We don't reserve metadata space for space cache inodes so we
1733 * don't need to call dellalloc_release_metadata if there is an
1736 if (*bits
& EXTENT_DO_ACCOUNTING
&&
1737 root
!= fs_info
->tree_root
)
1738 btrfs_delalloc_release_metadata(inode
, len
);
1740 /* For sanity tests. */
1741 if (btrfs_is_testing(fs_info
))
1744 if (root
->root_key
.objectid
!= BTRFS_DATA_RELOC_TREE_OBJECTID
1745 && do_list
&& !(state
->state
& EXTENT_NORESERVE
)
1746 && (*bits
& (EXTENT_DO_ACCOUNTING
|
1747 EXTENT_CLEAR_DATA_RESV
)))
1748 btrfs_free_reserved_data_space_noquota(
1752 __percpu_counter_add(&fs_info
->delalloc_bytes
, -len
,
1753 fs_info
->delalloc_batch
);
1754 spin_lock(&inode
->lock
);
1755 inode
->delalloc_bytes
-= len
;
1756 if (do_list
&& inode
->delalloc_bytes
== 0 &&
1757 test_bit(BTRFS_INODE_IN_DELALLOC_LIST
,
1758 &inode
->runtime_flags
))
1759 btrfs_del_delalloc_inode(root
, inode
);
1760 spin_unlock(&inode
->lock
);
1765 * extent_io.c merge_bio_hook, this must check the chunk tree to make sure
1766 * we don't create bios that span stripes or chunks
1768 * return 1 if page cannot be merged to bio
1769 * return 0 if page can be merged to bio
1770 * return error otherwise
1772 int btrfs_merge_bio_hook(struct page
*page
, unsigned long offset
,
1773 size_t size
, struct bio
*bio
,
1774 unsigned long bio_flags
)
1776 struct inode
*inode
= page
->mapping
->host
;
1777 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
1778 u64 logical
= (u64
)bio
->bi_iter
.bi_sector
<< 9;
1783 if (bio_flags
& EXTENT_BIO_COMPRESSED
)
1786 length
= bio
->bi_iter
.bi_size
;
1787 map_length
= length
;
1788 ret
= btrfs_map_block(fs_info
, btrfs_op(bio
), logical
, &map_length
,
1792 if (map_length
< length
+ size
)
1798 * in order to insert checksums into the metadata in large chunks,
1799 * we wait until bio submission time. All the pages in the bio are
1800 * checksummed and sums are attached onto the ordered extent record.
1802 * At IO completion time the cums attached on the ordered extent record
1803 * are inserted into the btree
1805 static int __btrfs_submit_bio_start(struct inode
*inode
, struct bio
*bio
,
1806 int mirror_num
, unsigned long bio_flags
,
1811 ret
= btrfs_csum_one_bio(inode
, bio
, 0, 0);
1812 BUG_ON(ret
); /* -ENOMEM */
1817 * in order to insert checksums into the metadata in large chunks,
1818 * we wait until bio submission time. All the pages in the bio are
1819 * checksummed and sums are attached onto the ordered extent record.
1821 * At IO completion time the cums attached on the ordered extent record
1822 * are inserted into the btree
1824 static int __btrfs_submit_bio_done(struct inode
*inode
, struct bio
*bio
,
1825 int mirror_num
, unsigned long bio_flags
,
1828 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
1831 ret
= btrfs_map_bio(fs_info
, bio
, mirror_num
, 1);
1833 bio
->bi_error
= ret
;
1840 * extent_io.c submission hook. This does the right thing for csum calculation
1841 * on write, or reading the csums from the tree before a read
1843 static int btrfs_submit_bio_hook(struct inode
*inode
, struct bio
*bio
,
1844 int mirror_num
, unsigned long bio_flags
,
1847 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
1848 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
1849 enum btrfs_wq_endio_type metadata
= BTRFS_WQ_ENDIO_DATA
;
1852 int async
= !atomic_read(&BTRFS_I(inode
)->sync_writers
);
1854 skip_sum
= BTRFS_I(inode
)->flags
& BTRFS_INODE_NODATASUM
;
1856 if (btrfs_is_free_space_inode(BTRFS_I(inode
)))
1857 metadata
= BTRFS_WQ_ENDIO_FREE_SPACE
;
1859 if (bio_op(bio
) != REQ_OP_WRITE
) {
1860 ret
= btrfs_bio_wq_end_io(fs_info
, bio
, metadata
);
1864 if (bio_flags
& EXTENT_BIO_COMPRESSED
) {
1865 ret
= btrfs_submit_compressed_read(inode
, bio
,
1869 } else if (!skip_sum
) {
1870 ret
= btrfs_lookup_bio_sums(inode
, bio
, NULL
);
1875 } else if (async
&& !skip_sum
) {
1876 /* csum items have already been cloned */
1877 if (root
->root_key
.objectid
== BTRFS_DATA_RELOC_TREE_OBJECTID
)
1879 /* we're doing a write, do the async checksumming */
1880 ret
= btrfs_wq_submit_bio(fs_info
, inode
, bio
, mirror_num
,
1881 bio_flags
, bio_offset
,
1882 __btrfs_submit_bio_start
,
1883 __btrfs_submit_bio_done
);
1885 } else if (!skip_sum
) {
1886 ret
= btrfs_csum_one_bio(inode
, bio
, 0, 0);
1892 ret
= btrfs_map_bio(fs_info
, bio
, mirror_num
, 0);
1896 bio
->bi_error
= ret
;
1903 * given a list of ordered sums record them in the inode. This happens
1904 * at IO completion time based on sums calculated at bio submission time.
1906 static noinline
int add_pending_csums(struct btrfs_trans_handle
*trans
,
1907 struct inode
*inode
, struct list_head
*list
)
1909 struct btrfs_ordered_sum
*sum
;
1911 list_for_each_entry(sum
, list
, list
) {
1912 trans
->adding_csums
= 1;
1913 btrfs_csum_file_blocks(trans
,
1914 BTRFS_I(inode
)->root
->fs_info
->csum_root
, sum
);
1915 trans
->adding_csums
= 0;
1920 int btrfs_set_extent_delalloc(struct inode
*inode
, u64 start
, u64 end
,
1921 struct extent_state
**cached_state
, int dedupe
)
1923 WARN_ON((end
& (PAGE_SIZE
- 1)) == 0);
1924 return set_extent_delalloc(&BTRFS_I(inode
)->io_tree
, start
, end
,
1928 /* see btrfs_writepage_start_hook for details on why this is required */
1929 struct btrfs_writepage_fixup
{
1931 struct btrfs_work work
;
1934 static void btrfs_writepage_fixup_worker(struct btrfs_work
*work
)
1936 struct btrfs_writepage_fixup
*fixup
;
1937 struct btrfs_ordered_extent
*ordered
;
1938 struct extent_state
*cached_state
= NULL
;
1940 struct inode
*inode
;
1945 fixup
= container_of(work
, struct btrfs_writepage_fixup
, work
);
1949 if (!page
->mapping
|| !PageDirty(page
) || !PageChecked(page
)) {
1950 ClearPageChecked(page
);
1954 inode
= page
->mapping
->host
;
1955 page_start
= page_offset(page
);
1956 page_end
= page_offset(page
) + PAGE_SIZE
- 1;
1958 lock_extent_bits(&BTRFS_I(inode
)->io_tree
, page_start
, page_end
,
1961 /* already ordered? We're done */
1962 if (PagePrivate2(page
))
1965 ordered
= btrfs_lookup_ordered_range(BTRFS_I(inode
), page_start
,
1968 unlock_extent_cached(&BTRFS_I(inode
)->io_tree
, page_start
,
1969 page_end
, &cached_state
, GFP_NOFS
);
1971 btrfs_start_ordered_extent(inode
, ordered
, 1);
1972 btrfs_put_ordered_extent(ordered
);
1976 ret
= btrfs_delalloc_reserve_space(inode
, page_start
,
1979 mapping_set_error(page
->mapping
, ret
);
1980 end_extent_writepage(page
, ret
, page_start
, page_end
);
1981 ClearPageChecked(page
);
1985 btrfs_set_extent_delalloc(inode
, page_start
, page_end
, &cached_state
,
1987 ClearPageChecked(page
);
1988 set_page_dirty(page
);
1990 unlock_extent_cached(&BTRFS_I(inode
)->io_tree
, page_start
, page_end
,
1991 &cached_state
, GFP_NOFS
);
1999 * There are a few paths in the higher layers of the kernel that directly
2000 * set the page dirty bit without asking the filesystem if it is a
2001 * good idea. This causes problems because we want to make sure COW
2002 * properly happens and the data=ordered rules are followed.
2004 * In our case any range that doesn't have the ORDERED bit set
2005 * hasn't been properly setup for IO. We kick off an async process
2006 * to fix it up. The async helper will wait for ordered extents, set
2007 * the delalloc bit and make it safe to write the page.
2009 static int btrfs_writepage_start_hook(struct page
*page
, u64 start
, u64 end
)
2011 struct inode
*inode
= page
->mapping
->host
;
2012 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
2013 struct btrfs_writepage_fixup
*fixup
;
2015 /* this page is properly in the ordered list */
2016 if (TestClearPagePrivate2(page
))
2019 if (PageChecked(page
))
2022 fixup
= kzalloc(sizeof(*fixup
), GFP_NOFS
);
2026 SetPageChecked(page
);
2028 btrfs_init_work(&fixup
->work
, btrfs_fixup_helper
,
2029 btrfs_writepage_fixup_worker
, NULL
, NULL
);
2031 btrfs_queue_work(fs_info
->fixup_workers
, &fixup
->work
);
2035 static int insert_reserved_file_extent(struct btrfs_trans_handle
*trans
,
2036 struct inode
*inode
, u64 file_pos
,
2037 u64 disk_bytenr
, u64 disk_num_bytes
,
2038 u64 num_bytes
, u64 ram_bytes
,
2039 u8 compression
, u8 encryption
,
2040 u16 other_encoding
, int extent_type
)
2042 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
2043 struct btrfs_file_extent_item
*fi
;
2044 struct btrfs_path
*path
;
2045 struct extent_buffer
*leaf
;
2046 struct btrfs_key ins
;
2047 int extent_inserted
= 0;
2050 path
= btrfs_alloc_path();
2055 * we may be replacing one extent in the tree with another.
2056 * The new extent is pinned in the extent map, and we don't want
2057 * to drop it from the cache until it is completely in the btree.
2059 * So, tell btrfs_drop_extents to leave this extent in the cache.
2060 * the caller is expected to unpin it and allow it to be merged
2063 ret
= __btrfs_drop_extents(trans
, root
, inode
, path
, file_pos
,
2064 file_pos
+ num_bytes
, NULL
, 0,
2065 1, sizeof(*fi
), &extent_inserted
);
2069 if (!extent_inserted
) {
2070 ins
.objectid
= btrfs_ino(BTRFS_I(inode
));
2071 ins
.offset
= file_pos
;
2072 ins
.type
= BTRFS_EXTENT_DATA_KEY
;
2074 path
->leave_spinning
= 1;
2075 ret
= btrfs_insert_empty_item(trans
, root
, path
, &ins
,
2080 leaf
= path
->nodes
[0];
2081 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
2082 struct btrfs_file_extent_item
);
2083 btrfs_set_file_extent_generation(leaf
, fi
, trans
->transid
);
2084 btrfs_set_file_extent_type(leaf
, fi
, extent_type
);
2085 btrfs_set_file_extent_disk_bytenr(leaf
, fi
, disk_bytenr
);
2086 btrfs_set_file_extent_disk_num_bytes(leaf
, fi
, disk_num_bytes
);
2087 btrfs_set_file_extent_offset(leaf
, fi
, 0);
2088 btrfs_set_file_extent_num_bytes(leaf
, fi
, num_bytes
);
2089 btrfs_set_file_extent_ram_bytes(leaf
, fi
, ram_bytes
);
2090 btrfs_set_file_extent_compression(leaf
, fi
, compression
);
2091 btrfs_set_file_extent_encryption(leaf
, fi
, encryption
);
2092 btrfs_set_file_extent_other_encoding(leaf
, fi
, other_encoding
);
2094 btrfs_mark_buffer_dirty(leaf
);
2095 btrfs_release_path(path
);
2097 inode_add_bytes(inode
, num_bytes
);
2099 ins
.objectid
= disk_bytenr
;
2100 ins
.offset
= disk_num_bytes
;
2101 ins
.type
= BTRFS_EXTENT_ITEM_KEY
;
2102 ret
= btrfs_alloc_reserved_file_extent(trans
, root
->root_key
.objectid
,
2103 btrfs_ino(BTRFS_I(inode
)), file_pos
, ram_bytes
, &ins
);
2105 * Release the reserved range from inode dirty range map, as it is
2106 * already moved into delayed_ref_head
2108 btrfs_qgroup_release_data(inode
, file_pos
, ram_bytes
);
2110 btrfs_free_path(path
);
2115 /* snapshot-aware defrag */
2116 struct sa_defrag_extent_backref
{
2117 struct rb_node node
;
2118 struct old_sa_defrag_extent
*old
;
2127 struct old_sa_defrag_extent
{
2128 struct list_head list
;
2129 struct new_sa_defrag_extent
*new;
2138 struct new_sa_defrag_extent
{
2139 struct rb_root root
;
2140 struct list_head head
;
2141 struct btrfs_path
*path
;
2142 struct inode
*inode
;
2150 static int backref_comp(struct sa_defrag_extent_backref
*b1
,
2151 struct sa_defrag_extent_backref
*b2
)
2153 if (b1
->root_id
< b2
->root_id
)
2155 else if (b1
->root_id
> b2
->root_id
)
2158 if (b1
->inum
< b2
->inum
)
2160 else if (b1
->inum
> b2
->inum
)
2163 if (b1
->file_pos
< b2
->file_pos
)
2165 else if (b1
->file_pos
> b2
->file_pos
)
2169 * [------------------------------] ===> (a range of space)
2170 * |<--->| |<---->| =============> (fs/file tree A)
2171 * |<---------------------------->| ===> (fs/file tree B)
2173 * A range of space can refer to two file extents in one tree while
2174 * refer to only one file extent in another tree.
2176 * So we may process a disk offset more than one time(two extents in A)
2177 * and locate at the same extent(one extent in B), then insert two same
2178 * backrefs(both refer to the extent in B).
2183 static void backref_insert(struct rb_root
*root
,
2184 struct sa_defrag_extent_backref
*backref
)
2186 struct rb_node
**p
= &root
->rb_node
;
2187 struct rb_node
*parent
= NULL
;
2188 struct sa_defrag_extent_backref
*entry
;
2193 entry
= rb_entry(parent
, struct sa_defrag_extent_backref
, node
);
2195 ret
= backref_comp(backref
, entry
);
2199 p
= &(*p
)->rb_right
;
2202 rb_link_node(&backref
->node
, parent
, p
);
2203 rb_insert_color(&backref
->node
, root
);
2207 * Note the backref might has changed, and in this case we just return 0.
2209 static noinline
int record_one_backref(u64 inum
, u64 offset
, u64 root_id
,
2212 struct btrfs_file_extent_item
*extent
;
2213 struct old_sa_defrag_extent
*old
= ctx
;
2214 struct new_sa_defrag_extent
*new = old
->new;
2215 struct btrfs_path
*path
= new->path
;
2216 struct btrfs_key key
;
2217 struct btrfs_root
*root
;
2218 struct sa_defrag_extent_backref
*backref
;
2219 struct extent_buffer
*leaf
;
2220 struct inode
*inode
= new->inode
;
2221 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
2227 if (BTRFS_I(inode
)->root
->root_key
.objectid
== root_id
&&
2228 inum
== btrfs_ino(BTRFS_I(inode
)))
2231 key
.objectid
= root_id
;
2232 key
.type
= BTRFS_ROOT_ITEM_KEY
;
2233 key
.offset
= (u64
)-1;
2235 root
= btrfs_read_fs_root_no_name(fs_info
, &key
);
2237 if (PTR_ERR(root
) == -ENOENT
)
2240 btrfs_debug(fs_info
, "inum=%llu, offset=%llu, root_id=%llu",
2241 inum
, offset
, root_id
);
2242 return PTR_ERR(root
);
2245 key
.objectid
= inum
;
2246 key
.type
= BTRFS_EXTENT_DATA_KEY
;
2247 if (offset
> (u64
)-1 << 32)
2250 key
.offset
= offset
;
2252 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
2253 if (WARN_ON(ret
< 0))
2260 leaf
= path
->nodes
[0];
2261 slot
= path
->slots
[0];
2263 if (slot
>= btrfs_header_nritems(leaf
)) {
2264 ret
= btrfs_next_leaf(root
, path
);
2267 } else if (ret
> 0) {
2276 btrfs_item_key_to_cpu(leaf
, &key
, slot
);
2278 if (key
.objectid
> inum
)
2281 if (key
.objectid
< inum
|| key
.type
!= BTRFS_EXTENT_DATA_KEY
)
2284 extent
= btrfs_item_ptr(leaf
, slot
,
2285 struct btrfs_file_extent_item
);
2287 if (btrfs_file_extent_disk_bytenr(leaf
, extent
) != old
->bytenr
)
2291 * 'offset' refers to the exact key.offset,
2292 * NOT the 'offset' field in btrfs_extent_data_ref, ie.
2293 * (key.offset - extent_offset).
2295 if (key
.offset
!= offset
)
2298 extent_offset
= btrfs_file_extent_offset(leaf
, extent
);
2299 num_bytes
= btrfs_file_extent_num_bytes(leaf
, extent
);
2301 if (extent_offset
>= old
->extent_offset
+ old
->offset
+
2302 old
->len
|| extent_offset
+ num_bytes
<=
2303 old
->extent_offset
+ old
->offset
)
2308 backref
= kmalloc(sizeof(*backref
), GFP_NOFS
);
2314 backref
->root_id
= root_id
;
2315 backref
->inum
= inum
;
2316 backref
->file_pos
= offset
;
2317 backref
->num_bytes
= num_bytes
;
2318 backref
->extent_offset
= extent_offset
;
2319 backref
->generation
= btrfs_file_extent_generation(leaf
, extent
);
2321 backref_insert(&new->root
, backref
);
2324 btrfs_release_path(path
);
2329 static noinline
bool record_extent_backrefs(struct btrfs_path
*path
,
2330 struct new_sa_defrag_extent
*new)
2332 struct btrfs_fs_info
*fs_info
= btrfs_sb(new->inode
->i_sb
);
2333 struct old_sa_defrag_extent
*old
, *tmp
;
2338 list_for_each_entry_safe(old
, tmp
, &new->head
, list
) {
2339 ret
= iterate_inodes_from_logical(old
->bytenr
+
2340 old
->extent_offset
, fs_info
,
2341 path
, record_one_backref
,
2343 if (ret
< 0 && ret
!= -ENOENT
)
2346 /* no backref to be processed for this extent */
2348 list_del(&old
->list
);
2353 if (list_empty(&new->head
))
2359 static int relink_is_mergable(struct extent_buffer
*leaf
,
2360 struct btrfs_file_extent_item
*fi
,
2361 struct new_sa_defrag_extent
*new)
2363 if (btrfs_file_extent_disk_bytenr(leaf
, fi
) != new->bytenr
)
2366 if (btrfs_file_extent_type(leaf
, fi
) != BTRFS_FILE_EXTENT_REG
)
2369 if (btrfs_file_extent_compression(leaf
, fi
) != new->compress_type
)
2372 if (btrfs_file_extent_encryption(leaf
, fi
) ||
2373 btrfs_file_extent_other_encoding(leaf
, fi
))
2380 * Note the backref might has changed, and in this case we just return 0.
2382 static noinline
int relink_extent_backref(struct btrfs_path
*path
,
2383 struct sa_defrag_extent_backref
*prev
,
2384 struct sa_defrag_extent_backref
*backref
)
2386 struct btrfs_file_extent_item
*extent
;
2387 struct btrfs_file_extent_item
*item
;
2388 struct btrfs_ordered_extent
*ordered
;
2389 struct btrfs_trans_handle
*trans
;
2390 struct btrfs_root
*root
;
2391 struct btrfs_key key
;
2392 struct extent_buffer
*leaf
;
2393 struct old_sa_defrag_extent
*old
= backref
->old
;
2394 struct new_sa_defrag_extent
*new = old
->new;
2395 struct btrfs_fs_info
*fs_info
= btrfs_sb(new->inode
->i_sb
);
2396 struct inode
*inode
;
2397 struct extent_state
*cached
= NULL
;
2406 if (prev
&& prev
->root_id
== backref
->root_id
&&
2407 prev
->inum
== backref
->inum
&&
2408 prev
->file_pos
+ prev
->num_bytes
== backref
->file_pos
)
2411 /* step 1: get root */
2412 key
.objectid
= backref
->root_id
;
2413 key
.type
= BTRFS_ROOT_ITEM_KEY
;
2414 key
.offset
= (u64
)-1;
2416 index
= srcu_read_lock(&fs_info
->subvol_srcu
);
2418 root
= btrfs_read_fs_root_no_name(fs_info
, &key
);
2420 srcu_read_unlock(&fs_info
->subvol_srcu
, index
);
2421 if (PTR_ERR(root
) == -ENOENT
)
2423 return PTR_ERR(root
);
2426 if (btrfs_root_readonly(root
)) {
2427 srcu_read_unlock(&fs_info
->subvol_srcu
, index
);
2431 /* step 2: get inode */
2432 key
.objectid
= backref
->inum
;
2433 key
.type
= BTRFS_INODE_ITEM_KEY
;
2436 inode
= btrfs_iget(fs_info
->sb
, &key
, root
, NULL
);
2437 if (IS_ERR(inode
)) {
2438 srcu_read_unlock(&fs_info
->subvol_srcu
, index
);
2442 srcu_read_unlock(&fs_info
->subvol_srcu
, index
);
2444 /* step 3: relink backref */
2445 lock_start
= backref
->file_pos
;
2446 lock_end
= backref
->file_pos
+ backref
->num_bytes
- 1;
2447 lock_extent_bits(&BTRFS_I(inode
)->io_tree
, lock_start
, lock_end
,
2450 ordered
= btrfs_lookup_first_ordered_extent(inode
, lock_end
);
2452 btrfs_put_ordered_extent(ordered
);
2456 trans
= btrfs_join_transaction(root
);
2457 if (IS_ERR(trans
)) {
2458 ret
= PTR_ERR(trans
);
2462 key
.objectid
= backref
->inum
;
2463 key
.type
= BTRFS_EXTENT_DATA_KEY
;
2464 key
.offset
= backref
->file_pos
;
2466 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
2469 } else if (ret
> 0) {
2474 extent
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
2475 struct btrfs_file_extent_item
);
2477 if (btrfs_file_extent_generation(path
->nodes
[0], extent
) !=
2478 backref
->generation
)
2481 btrfs_release_path(path
);
2483 start
= backref
->file_pos
;
2484 if (backref
->extent_offset
< old
->extent_offset
+ old
->offset
)
2485 start
+= old
->extent_offset
+ old
->offset
-
2486 backref
->extent_offset
;
2488 len
= min(backref
->extent_offset
+ backref
->num_bytes
,
2489 old
->extent_offset
+ old
->offset
+ old
->len
);
2490 len
-= max(backref
->extent_offset
, old
->extent_offset
+ old
->offset
);
2492 ret
= btrfs_drop_extents(trans
, root
, inode
, start
,
2497 key
.objectid
= btrfs_ino(BTRFS_I(inode
));
2498 key
.type
= BTRFS_EXTENT_DATA_KEY
;
2501 path
->leave_spinning
= 1;
2503 struct btrfs_file_extent_item
*fi
;
2505 struct btrfs_key found_key
;
2507 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 1);
2512 leaf
= path
->nodes
[0];
2513 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
2515 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
2516 struct btrfs_file_extent_item
);
2517 extent_len
= btrfs_file_extent_num_bytes(leaf
, fi
);
2519 if (extent_len
+ found_key
.offset
== start
&&
2520 relink_is_mergable(leaf
, fi
, new)) {
2521 btrfs_set_file_extent_num_bytes(leaf
, fi
,
2523 btrfs_mark_buffer_dirty(leaf
);
2524 inode_add_bytes(inode
, len
);
2530 btrfs_release_path(path
);
2535 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
,
2538 btrfs_abort_transaction(trans
, ret
);
2542 leaf
= path
->nodes
[0];
2543 item
= btrfs_item_ptr(leaf
, path
->slots
[0],
2544 struct btrfs_file_extent_item
);
2545 btrfs_set_file_extent_disk_bytenr(leaf
, item
, new->bytenr
);
2546 btrfs_set_file_extent_disk_num_bytes(leaf
, item
, new->disk_len
);
2547 btrfs_set_file_extent_offset(leaf
, item
, start
- new->file_pos
);
2548 btrfs_set_file_extent_num_bytes(leaf
, item
, len
);
2549 btrfs_set_file_extent_ram_bytes(leaf
, item
, new->len
);
2550 btrfs_set_file_extent_generation(leaf
, item
, trans
->transid
);
2551 btrfs_set_file_extent_type(leaf
, item
, BTRFS_FILE_EXTENT_REG
);
2552 btrfs_set_file_extent_compression(leaf
, item
, new->compress_type
);
2553 btrfs_set_file_extent_encryption(leaf
, item
, 0);
2554 btrfs_set_file_extent_other_encoding(leaf
, item
, 0);
2556 btrfs_mark_buffer_dirty(leaf
);
2557 inode_add_bytes(inode
, len
);
2558 btrfs_release_path(path
);
2560 ret
= btrfs_inc_extent_ref(trans
, fs_info
, new->bytenr
,
2562 backref
->root_id
, backref
->inum
,
2563 new->file_pos
); /* start - extent_offset */
2565 btrfs_abort_transaction(trans
, ret
);
2571 btrfs_release_path(path
);
2572 path
->leave_spinning
= 0;
2573 btrfs_end_transaction(trans
);
2575 unlock_extent_cached(&BTRFS_I(inode
)->io_tree
, lock_start
, lock_end
,
2581 static void free_sa_defrag_extent(struct new_sa_defrag_extent
*new)
2583 struct old_sa_defrag_extent
*old
, *tmp
;
2588 list_for_each_entry_safe(old
, tmp
, &new->head
, list
) {
2594 static void relink_file_extents(struct new_sa_defrag_extent
*new)
2596 struct btrfs_fs_info
*fs_info
= btrfs_sb(new->inode
->i_sb
);
2597 struct btrfs_path
*path
;
2598 struct sa_defrag_extent_backref
*backref
;
2599 struct sa_defrag_extent_backref
*prev
= NULL
;
2600 struct inode
*inode
;
2601 struct btrfs_root
*root
;
2602 struct rb_node
*node
;
2606 root
= BTRFS_I(inode
)->root
;
2608 path
= btrfs_alloc_path();
2612 if (!record_extent_backrefs(path
, new)) {
2613 btrfs_free_path(path
);
2616 btrfs_release_path(path
);
2619 node
= rb_first(&new->root
);
2622 rb_erase(node
, &new->root
);
2624 backref
= rb_entry(node
, struct sa_defrag_extent_backref
, node
);
2626 ret
= relink_extent_backref(path
, prev
, backref
);
2639 btrfs_free_path(path
);
2641 free_sa_defrag_extent(new);
2643 atomic_dec(&fs_info
->defrag_running
);
2644 wake_up(&fs_info
->transaction_wait
);
2647 static struct new_sa_defrag_extent
*
2648 record_old_file_extents(struct inode
*inode
,
2649 struct btrfs_ordered_extent
*ordered
)
2651 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
2652 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
2653 struct btrfs_path
*path
;
2654 struct btrfs_key key
;
2655 struct old_sa_defrag_extent
*old
;
2656 struct new_sa_defrag_extent
*new;
2659 new = kmalloc(sizeof(*new), GFP_NOFS
);
2664 new->file_pos
= ordered
->file_offset
;
2665 new->len
= ordered
->len
;
2666 new->bytenr
= ordered
->start
;
2667 new->disk_len
= ordered
->disk_len
;
2668 new->compress_type
= ordered
->compress_type
;
2669 new->root
= RB_ROOT
;
2670 INIT_LIST_HEAD(&new->head
);
2672 path
= btrfs_alloc_path();
2676 key
.objectid
= btrfs_ino(BTRFS_I(inode
));
2677 key
.type
= BTRFS_EXTENT_DATA_KEY
;
2678 key
.offset
= new->file_pos
;
2680 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
2683 if (ret
> 0 && path
->slots
[0] > 0)
2686 /* find out all the old extents for the file range */
2688 struct btrfs_file_extent_item
*extent
;
2689 struct extent_buffer
*l
;
2698 slot
= path
->slots
[0];
2700 if (slot
>= btrfs_header_nritems(l
)) {
2701 ret
= btrfs_next_leaf(root
, path
);
2709 btrfs_item_key_to_cpu(l
, &key
, slot
);
2711 if (key
.objectid
!= btrfs_ino(BTRFS_I(inode
)))
2713 if (key
.type
!= BTRFS_EXTENT_DATA_KEY
)
2715 if (key
.offset
>= new->file_pos
+ new->len
)
2718 extent
= btrfs_item_ptr(l
, slot
, struct btrfs_file_extent_item
);
2720 num_bytes
= btrfs_file_extent_num_bytes(l
, extent
);
2721 if (key
.offset
+ num_bytes
< new->file_pos
)
2724 disk_bytenr
= btrfs_file_extent_disk_bytenr(l
, extent
);
2728 extent_offset
= btrfs_file_extent_offset(l
, extent
);
2730 old
= kmalloc(sizeof(*old
), GFP_NOFS
);
2734 offset
= max(new->file_pos
, key
.offset
);
2735 end
= min(new->file_pos
+ new->len
, key
.offset
+ num_bytes
);
2737 old
->bytenr
= disk_bytenr
;
2738 old
->extent_offset
= extent_offset
;
2739 old
->offset
= offset
- key
.offset
;
2740 old
->len
= end
- offset
;
2743 list_add_tail(&old
->list
, &new->head
);
2749 btrfs_free_path(path
);
2750 atomic_inc(&fs_info
->defrag_running
);
2755 btrfs_free_path(path
);
2757 free_sa_defrag_extent(new);
2761 static void btrfs_release_delalloc_bytes(struct btrfs_fs_info
*fs_info
,
2764 struct btrfs_block_group_cache
*cache
;
2766 cache
= btrfs_lookup_block_group(fs_info
, start
);
2769 spin_lock(&cache
->lock
);
2770 cache
->delalloc_bytes
-= len
;
2771 spin_unlock(&cache
->lock
);
2773 btrfs_put_block_group(cache
);
2776 /* as ordered data IO finishes, this gets called so we can finish
2777 * an ordered extent if the range of bytes in the file it covers are
2780 static int btrfs_finish_ordered_io(struct btrfs_ordered_extent
*ordered_extent
)
2782 struct inode
*inode
= ordered_extent
->inode
;
2783 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
2784 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
2785 struct btrfs_trans_handle
*trans
= NULL
;
2786 struct extent_io_tree
*io_tree
= &BTRFS_I(inode
)->io_tree
;
2787 struct extent_state
*cached_state
= NULL
;
2788 struct new_sa_defrag_extent
*new = NULL
;
2789 int compress_type
= 0;
2791 u64 logical_len
= ordered_extent
->len
;
2793 bool truncated
= false;
2795 nolock
= btrfs_is_free_space_inode(BTRFS_I(inode
));
2797 if (test_bit(BTRFS_ORDERED_IOERR
, &ordered_extent
->flags
)) {
2802 btrfs_free_io_failure_record(BTRFS_I(inode
),
2803 ordered_extent
->file_offset
,
2804 ordered_extent
->file_offset
+
2805 ordered_extent
->len
- 1);
2807 if (test_bit(BTRFS_ORDERED_TRUNCATED
, &ordered_extent
->flags
)) {
2809 logical_len
= ordered_extent
->truncated_len
;
2810 /* Truncated the entire extent, don't bother adding */
2815 if (test_bit(BTRFS_ORDERED_NOCOW
, &ordered_extent
->flags
)) {
2816 BUG_ON(!list_empty(&ordered_extent
->list
)); /* Logic error */
2819 * For mwrite(mmap + memset to write) case, we still reserve
2820 * space for NOCOW range.
2821 * As NOCOW won't cause a new delayed ref, just free the space
2823 btrfs_qgroup_free_data(inode
, ordered_extent
->file_offset
,
2824 ordered_extent
->len
);
2825 btrfs_ordered_update_i_size(inode
, 0, ordered_extent
);
2827 trans
= btrfs_join_transaction_nolock(root
);
2829 trans
= btrfs_join_transaction(root
);
2830 if (IS_ERR(trans
)) {
2831 ret
= PTR_ERR(trans
);
2835 trans
->block_rsv
= &fs_info
->delalloc_block_rsv
;
2836 ret
= btrfs_update_inode_fallback(trans
, root
, inode
);
2837 if (ret
) /* -ENOMEM or corruption */
2838 btrfs_abort_transaction(trans
, ret
);
2842 lock_extent_bits(io_tree
, ordered_extent
->file_offset
,
2843 ordered_extent
->file_offset
+ ordered_extent
->len
- 1,
2846 ret
= test_range_bit(io_tree
, ordered_extent
->file_offset
,
2847 ordered_extent
->file_offset
+ ordered_extent
->len
- 1,
2848 EXTENT_DEFRAG
, 1, cached_state
);
2850 u64 last_snapshot
= btrfs_root_last_snapshot(&root
->root_item
);
2851 if (0 && last_snapshot
>= BTRFS_I(inode
)->generation
)
2852 /* the inode is shared */
2853 new = record_old_file_extents(inode
, ordered_extent
);
2855 clear_extent_bit(io_tree
, ordered_extent
->file_offset
,
2856 ordered_extent
->file_offset
+ ordered_extent
->len
- 1,
2857 EXTENT_DEFRAG
, 0, 0, &cached_state
, GFP_NOFS
);
2861 trans
= btrfs_join_transaction_nolock(root
);
2863 trans
= btrfs_join_transaction(root
);
2864 if (IS_ERR(trans
)) {
2865 ret
= PTR_ERR(trans
);
2870 trans
->block_rsv
= &fs_info
->delalloc_block_rsv
;
2872 if (test_bit(BTRFS_ORDERED_COMPRESSED
, &ordered_extent
->flags
))
2873 compress_type
= ordered_extent
->compress_type
;
2874 if (test_bit(BTRFS_ORDERED_PREALLOC
, &ordered_extent
->flags
)) {
2875 BUG_ON(compress_type
);
2876 ret
= btrfs_mark_extent_written(trans
, BTRFS_I(inode
),
2877 ordered_extent
->file_offset
,
2878 ordered_extent
->file_offset
+
2881 BUG_ON(root
== fs_info
->tree_root
);
2882 ret
= insert_reserved_file_extent(trans
, inode
,
2883 ordered_extent
->file_offset
,
2884 ordered_extent
->start
,
2885 ordered_extent
->disk_len
,
2886 logical_len
, logical_len
,
2887 compress_type
, 0, 0,
2888 BTRFS_FILE_EXTENT_REG
);
2890 btrfs_release_delalloc_bytes(fs_info
,
2891 ordered_extent
->start
,
2892 ordered_extent
->disk_len
);
2894 unpin_extent_cache(&BTRFS_I(inode
)->extent_tree
,
2895 ordered_extent
->file_offset
, ordered_extent
->len
,
2898 btrfs_abort_transaction(trans
, ret
);
2902 add_pending_csums(trans
, inode
, &ordered_extent
->list
);
2904 btrfs_ordered_update_i_size(inode
, 0, ordered_extent
);
2905 ret
= btrfs_update_inode_fallback(trans
, root
, inode
);
2906 if (ret
) { /* -ENOMEM or corruption */
2907 btrfs_abort_transaction(trans
, ret
);
2912 unlock_extent_cached(io_tree
, ordered_extent
->file_offset
,
2913 ordered_extent
->file_offset
+
2914 ordered_extent
->len
- 1, &cached_state
, GFP_NOFS
);
2916 if (root
!= fs_info
->tree_root
)
2917 btrfs_delalloc_release_metadata(BTRFS_I(inode
),
2918 ordered_extent
->len
);
2920 btrfs_end_transaction(trans
);
2922 if (ret
|| truncated
) {
2926 start
= ordered_extent
->file_offset
+ logical_len
;
2928 start
= ordered_extent
->file_offset
;
2929 end
= ordered_extent
->file_offset
+ ordered_extent
->len
- 1;
2930 clear_extent_uptodate(io_tree
, start
, end
, NULL
, GFP_NOFS
);
2932 /* Drop the cache for the part of the extent we didn't write. */
2933 btrfs_drop_extent_cache(BTRFS_I(inode
), start
, end
, 0);
2936 * If the ordered extent had an IOERR or something else went
2937 * wrong we need to return the space for this ordered extent
2938 * back to the allocator. We only free the extent in the
2939 * truncated case if we didn't write out the extent at all.
2941 if ((ret
|| !logical_len
) &&
2942 !test_bit(BTRFS_ORDERED_NOCOW
, &ordered_extent
->flags
) &&
2943 !test_bit(BTRFS_ORDERED_PREALLOC
, &ordered_extent
->flags
))
2944 btrfs_free_reserved_extent(fs_info
,
2945 ordered_extent
->start
,
2946 ordered_extent
->disk_len
, 1);
2951 * This needs to be done to make sure anybody waiting knows we are done
2952 * updating everything for this ordered extent.
2954 btrfs_remove_ordered_extent(inode
, ordered_extent
);
2956 /* for snapshot-aware defrag */
2959 free_sa_defrag_extent(new);
2960 atomic_dec(&fs_info
->defrag_running
);
2962 relink_file_extents(new);
2967 btrfs_put_ordered_extent(ordered_extent
);
2968 /* once for the tree */
2969 btrfs_put_ordered_extent(ordered_extent
);
2974 static void finish_ordered_fn(struct btrfs_work
*work
)
2976 struct btrfs_ordered_extent
*ordered_extent
;
2977 ordered_extent
= container_of(work
, struct btrfs_ordered_extent
, work
);
2978 btrfs_finish_ordered_io(ordered_extent
);
2981 static void btrfs_writepage_end_io_hook(struct page
*page
, u64 start
, u64 end
,
2982 struct extent_state
*state
, int uptodate
)
2984 struct inode
*inode
= page
->mapping
->host
;
2985 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
2986 struct btrfs_ordered_extent
*ordered_extent
= NULL
;
2987 struct btrfs_workqueue
*wq
;
2988 btrfs_work_func_t func
;
2990 trace_btrfs_writepage_end_io_hook(page
, start
, end
, uptodate
);
2992 ClearPagePrivate2(page
);
2993 if (!btrfs_dec_test_ordered_pending(inode
, &ordered_extent
, start
,
2994 end
- start
+ 1, uptodate
))
2997 if (btrfs_is_free_space_inode(BTRFS_I(inode
))) {
2998 wq
= fs_info
->endio_freespace_worker
;
2999 func
= btrfs_freespace_write_helper
;
3001 wq
= fs_info
->endio_write_workers
;
3002 func
= btrfs_endio_write_helper
;
3005 btrfs_init_work(&ordered_extent
->work
, func
, finish_ordered_fn
, NULL
,
3007 btrfs_queue_work(wq
, &ordered_extent
->work
);
3010 static int __readpage_endio_check(struct inode
*inode
,
3011 struct btrfs_io_bio
*io_bio
,
3012 int icsum
, struct page
*page
,
3013 int pgoff
, u64 start
, size_t len
)
3019 csum_expected
= *(((u32
*)io_bio
->csum
) + icsum
);
3021 kaddr
= kmap_atomic(page
);
3022 csum
= btrfs_csum_data(kaddr
+ pgoff
, csum
, len
);
3023 btrfs_csum_final(csum
, (u8
*)&csum
);
3024 if (csum
!= csum_expected
)
3027 kunmap_atomic(kaddr
);
3030 btrfs_print_data_csum_error(BTRFS_I(inode
), start
, csum
, csum_expected
,
3031 io_bio
->mirror_num
);
3032 memset(kaddr
+ pgoff
, 1, len
);
3033 flush_dcache_page(page
);
3034 kunmap_atomic(kaddr
);
3035 if (csum_expected
== 0)
3041 * when reads are done, we need to check csums to verify the data is correct
3042 * if there's a match, we allow the bio to finish. If not, the code in
3043 * extent_io.c will try to find good copies for us.
3045 static int btrfs_readpage_end_io_hook(struct btrfs_io_bio
*io_bio
,
3046 u64 phy_offset
, struct page
*page
,
3047 u64 start
, u64 end
, int mirror
)
3049 size_t offset
= start
- page_offset(page
);
3050 struct inode
*inode
= page
->mapping
->host
;
3051 struct extent_io_tree
*io_tree
= &BTRFS_I(inode
)->io_tree
;
3052 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
3054 if (PageChecked(page
)) {
3055 ClearPageChecked(page
);
3059 if (BTRFS_I(inode
)->flags
& BTRFS_INODE_NODATASUM
)
3062 if (root
->root_key
.objectid
== BTRFS_DATA_RELOC_TREE_OBJECTID
&&
3063 test_range_bit(io_tree
, start
, end
, EXTENT_NODATASUM
, 1, NULL
)) {
3064 clear_extent_bits(io_tree
, start
, end
, EXTENT_NODATASUM
);
3068 phy_offset
>>= inode
->i_sb
->s_blocksize_bits
;
3069 return __readpage_endio_check(inode
, io_bio
, phy_offset
, page
, offset
,
3070 start
, (size_t)(end
- start
+ 1));
3073 void btrfs_add_delayed_iput(struct inode
*inode
)
3075 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
3076 struct btrfs_inode
*binode
= BTRFS_I(inode
);
3078 if (atomic_add_unless(&inode
->i_count
, -1, 1))
3081 spin_lock(&fs_info
->delayed_iput_lock
);
3082 if (binode
->delayed_iput_count
== 0) {
3083 ASSERT(list_empty(&binode
->delayed_iput
));
3084 list_add_tail(&binode
->delayed_iput
, &fs_info
->delayed_iputs
);
3086 binode
->delayed_iput_count
++;
3088 spin_unlock(&fs_info
->delayed_iput_lock
);
3091 void btrfs_run_delayed_iputs(struct btrfs_fs_info
*fs_info
)
3094 spin_lock(&fs_info
->delayed_iput_lock
);
3095 while (!list_empty(&fs_info
->delayed_iputs
)) {
3096 struct btrfs_inode
*inode
;
3098 inode
= list_first_entry(&fs_info
->delayed_iputs
,
3099 struct btrfs_inode
, delayed_iput
);
3100 if (inode
->delayed_iput_count
) {
3101 inode
->delayed_iput_count
--;
3102 list_move_tail(&inode
->delayed_iput
,
3103 &fs_info
->delayed_iputs
);
3105 list_del_init(&inode
->delayed_iput
);
3107 spin_unlock(&fs_info
->delayed_iput_lock
);
3108 iput(&inode
->vfs_inode
);
3109 spin_lock(&fs_info
->delayed_iput_lock
);
3111 spin_unlock(&fs_info
->delayed_iput_lock
);
3115 * This is called in transaction commit time. If there are no orphan
3116 * files in the subvolume, it removes orphan item and frees block_rsv
3119 void btrfs_orphan_commit_root(struct btrfs_trans_handle
*trans
,
3120 struct btrfs_root
*root
)
3122 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
3123 struct btrfs_block_rsv
*block_rsv
;
3126 if (atomic_read(&root
->orphan_inodes
) ||
3127 root
->orphan_cleanup_state
!= ORPHAN_CLEANUP_DONE
)
3130 spin_lock(&root
->orphan_lock
);
3131 if (atomic_read(&root
->orphan_inodes
)) {
3132 spin_unlock(&root
->orphan_lock
);
3136 if (root
->orphan_cleanup_state
!= ORPHAN_CLEANUP_DONE
) {
3137 spin_unlock(&root
->orphan_lock
);
3141 block_rsv
= root
->orphan_block_rsv
;
3142 root
->orphan_block_rsv
= NULL
;
3143 spin_unlock(&root
->orphan_lock
);
3145 if (test_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED
, &root
->state
) &&
3146 btrfs_root_refs(&root
->root_item
) > 0) {
3147 ret
= btrfs_del_orphan_item(trans
, fs_info
->tree_root
,
3148 root
->root_key
.objectid
);
3150 btrfs_abort_transaction(trans
, ret
);
3152 clear_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED
,
3157 WARN_ON(block_rsv
->size
> 0);
3158 btrfs_free_block_rsv(fs_info
, block_rsv
);
3163 * This creates an orphan entry for the given inode in case something goes
3164 * wrong in the middle of an unlink/truncate.
3166 * NOTE: caller of this function should reserve 5 units of metadata for
3169 int btrfs_orphan_add(struct btrfs_trans_handle
*trans
,
3170 struct btrfs_inode
*inode
)
3172 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->vfs_inode
.i_sb
);
3173 struct btrfs_root
*root
= inode
->root
;
3174 struct btrfs_block_rsv
*block_rsv
= NULL
;
3179 if (!root
->orphan_block_rsv
) {
3180 block_rsv
= btrfs_alloc_block_rsv(fs_info
,
3181 BTRFS_BLOCK_RSV_TEMP
);
3186 spin_lock(&root
->orphan_lock
);
3187 if (!root
->orphan_block_rsv
) {
3188 root
->orphan_block_rsv
= block_rsv
;
3189 } else if (block_rsv
) {
3190 btrfs_free_block_rsv(fs_info
, block_rsv
);
3194 if (!test_and_set_bit(BTRFS_INODE_HAS_ORPHAN_ITEM
,
3195 &inode
->runtime_flags
)) {
3198 * For proper ENOSPC handling, we should do orphan
3199 * cleanup when mounting. But this introduces backward
3200 * compatibility issue.
3202 if (!xchg(&root
->orphan_item_inserted
, 1))
3208 atomic_inc(&root
->orphan_inodes
);
3211 if (!test_and_set_bit(BTRFS_INODE_ORPHAN_META_RESERVED
,
3212 &inode
->runtime_flags
))
3214 spin_unlock(&root
->orphan_lock
);
3216 /* grab metadata reservation from transaction handle */
3218 ret
= btrfs_orphan_reserve_metadata(trans
, inode
);
3221 atomic_dec(&root
->orphan_inodes
);
3222 clear_bit(BTRFS_INODE_ORPHAN_META_RESERVED
,
3223 &inode
->runtime_flags
);
3225 clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM
,
3226 &inode
->runtime_flags
);
3231 /* insert an orphan item to track this unlinked/truncated file */
3233 ret
= btrfs_insert_orphan_item(trans
, root
, btrfs_ino(inode
));
3235 atomic_dec(&root
->orphan_inodes
);
3237 clear_bit(BTRFS_INODE_ORPHAN_META_RESERVED
,
3238 &inode
->runtime_flags
);
3239 btrfs_orphan_release_metadata(inode
);
3241 if (ret
!= -EEXIST
) {
3242 clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM
,
3243 &inode
->runtime_flags
);
3244 btrfs_abort_transaction(trans
, ret
);
3251 /* insert an orphan item to track subvolume contains orphan files */
3253 ret
= btrfs_insert_orphan_item(trans
, fs_info
->tree_root
,
3254 root
->root_key
.objectid
);
3255 if (ret
&& ret
!= -EEXIST
) {
3256 btrfs_abort_transaction(trans
, ret
);
3264 * We have done the truncate/delete so we can go ahead and remove the orphan
3265 * item for this particular inode.
3267 static int btrfs_orphan_del(struct btrfs_trans_handle
*trans
,
3268 struct btrfs_inode
*inode
)
3270 struct btrfs_root
*root
= inode
->root
;
3271 int delete_item
= 0;
3272 int release_rsv
= 0;
3275 spin_lock(&root
->orphan_lock
);
3276 if (test_and_clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM
,
3277 &inode
->runtime_flags
))
3280 if (test_and_clear_bit(BTRFS_INODE_ORPHAN_META_RESERVED
,
3281 &inode
->runtime_flags
))
3283 spin_unlock(&root
->orphan_lock
);
3286 atomic_dec(&root
->orphan_inodes
);
3288 ret
= btrfs_del_orphan_item(trans
, root
,
3293 btrfs_orphan_release_metadata(inode
);
3299 * this cleans up any orphans that may be left on the list from the last use
3302 int btrfs_orphan_cleanup(struct btrfs_root
*root
)
3304 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
3305 struct btrfs_path
*path
;
3306 struct extent_buffer
*leaf
;
3307 struct btrfs_key key
, found_key
;
3308 struct btrfs_trans_handle
*trans
;
3309 struct inode
*inode
;
3310 u64 last_objectid
= 0;
3311 int ret
= 0, nr_unlink
= 0, nr_truncate
= 0;
3313 if (cmpxchg(&root
->orphan_cleanup_state
, 0, ORPHAN_CLEANUP_STARTED
))
3316 path
= btrfs_alloc_path();
3321 path
->reada
= READA_BACK
;
3323 key
.objectid
= BTRFS_ORPHAN_OBJECTID
;
3324 key
.type
= BTRFS_ORPHAN_ITEM_KEY
;
3325 key
.offset
= (u64
)-1;
3328 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
3333 * if ret == 0 means we found what we were searching for, which
3334 * is weird, but possible, so only screw with path if we didn't
3335 * find the key and see if we have stuff that matches
3339 if (path
->slots
[0] == 0)
3344 /* pull out the item */
3345 leaf
= path
->nodes
[0];
3346 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
3348 /* make sure the item matches what we want */
3349 if (found_key
.objectid
!= BTRFS_ORPHAN_OBJECTID
)
3351 if (found_key
.type
!= BTRFS_ORPHAN_ITEM_KEY
)
3354 /* release the path since we're done with it */
3355 btrfs_release_path(path
);
3358 * this is where we are basically btrfs_lookup, without the
3359 * crossing root thing. we store the inode number in the
3360 * offset of the orphan item.
3363 if (found_key
.offset
== last_objectid
) {
3365 "Error removing orphan entry, stopping orphan cleanup");
3370 last_objectid
= found_key
.offset
;
3372 found_key
.objectid
= found_key
.offset
;
3373 found_key
.type
= BTRFS_INODE_ITEM_KEY
;
3374 found_key
.offset
= 0;
3375 inode
= btrfs_iget(fs_info
->sb
, &found_key
, root
, NULL
);
3376 ret
= PTR_ERR_OR_ZERO(inode
);
3377 if (ret
&& ret
!= -ENOENT
)
3380 if (ret
== -ENOENT
&& root
== fs_info
->tree_root
) {
3381 struct btrfs_root
*dead_root
;
3382 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
3383 int is_dead_root
= 0;
3386 * this is an orphan in the tree root. Currently these
3387 * could come from 2 sources:
3388 * a) a snapshot deletion in progress
3389 * b) a free space cache inode
3390 * We need to distinguish those two, as the snapshot
3391 * orphan must not get deleted.
3392 * find_dead_roots already ran before us, so if this
3393 * is a snapshot deletion, we should find the root
3394 * in the dead_roots list
3396 spin_lock(&fs_info
->trans_lock
);
3397 list_for_each_entry(dead_root
, &fs_info
->dead_roots
,
3399 if (dead_root
->root_key
.objectid
==
3400 found_key
.objectid
) {
3405 spin_unlock(&fs_info
->trans_lock
);
3407 /* prevent this orphan from being found again */
3408 key
.offset
= found_key
.objectid
- 1;
3413 * Inode is already gone but the orphan item is still there,
3414 * kill the orphan item.
3416 if (ret
== -ENOENT
) {
3417 trans
= btrfs_start_transaction(root
, 1);
3418 if (IS_ERR(trans
)) {
3419 ret
= PTR_ERR(trans
);
3422 btrfs_debug(fs_info
, "auto deleting %Lu",
3423 found_key
.objectid
);
3424 ret
= btrfs_del_orphan_item(trans
, root
,
3425 found_key
.objectid
);
3426 btrfs_end_transaction(trans
);
3433 * add this inode to the orphan list so btrfs_orphan_del does
3434 * the proper thing when we hit it
3436 set_bit(BTRFS_INODE_HAS_ORPHAN_ITEM
,
3437 &BTRFS_I(inode
)->runtime_flags
);
3438 atomic_inc(&root
->orphan_inodes
);
3440 /* if we have links, this was a truncate, lets do that */
3441 if (inode
->i_nlink
) {
3442 if (WARN_ON(!S_ISREG(inode
->i_mode
))) {
3448 /* 1 for the orphan item deletion. */
3449 trans
= btrfs_start_transaction(root
, 1);
3450 if (IS_ERR(trans
)) {
3452 ret
= PTR_ERR(trans
);
3455 ret
= btrfs_orphan_add(trans
, BTRFS_I(inode
));
3456 btrfs_end_transaction(trans
);
3462 ret
= btrfs_truncate(inode
);
3464 btrfs_orphan_del(NULL
, BTRFS_I(inode
));
3469 /* this will do delete_inode and everything for us */
3474 /* release the path since we're done with it */
3475 btrfs_release_path(path
);
3477 root
->orphan_cleanup_state
= ORPHAN_CLEANUP_DONE
;
3479 if (root
->orphan_block_rsv
)
3480 btrfs_block_rsv_release(fs_info
, root
->orphan_block_rsv
,
3483 if (root
->orphan_block_rsv
||
3484 test_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED
, &root
->state
)) {
3485 trans
= btrfs_join_transaction(root
);
3487 btrfs_end_transaction(trans
);
3491 btrfs_debug(fs_info
, "unlinked %d orphans", nr_unlink
);
3493 btrfs_debug(fs_info
, "truncated %d orphans", nr_truncate
);
3497 btrfs_err(fs_info
, "could not do orphan cleanup %d", ret
);
3498 btrfs_free_path(path
);
3503 * very simple check to peek ahead in the leaf looking for xattrs. If we
3504 * don't find any xattrs, we know there can't be any acls.
3506 * slot is the slot the inode is in, objectid is the objectid of the inode
3508 static noinline
int acls_after_inode_item(struct extent_buffer
*leaf
,
3509 int slot
, u64 objectid
,
3510 int *first_xattr_slot
)
3512 u32 nritems
= btrfs_header_nritems(leaf
);
3513 struct btrfs_key found_key
;
3514 static u64 xattr_access
= 0;
3515 static u64 xattr_default
= 0;
3518 if (!xattr_access
) {
3519 xattr_access
= btrfs_name_hash(XATTR_NAME_POSIX_ACL_ACCESS
,
3520 strlen(XATTR_NAME_POSIX_ACL_ACCESS
));
3521 xattr_default
= btrfs_name_hash(XATTR_NAME_POSIX_ACL_DEFAULT
,
3522 strlen(XATTR_NAME_POSIX_ACL_DEFAULT
));
3526 *first_xattr_slot
= -1;
3527 while (slot
< nritems
) {
3528 btrfs_item_key_to_cpu(leaf
, &found_key
, slot
);
3530 /* we found a different objectid, there must not be acls */
3531 if (found_key
.objectid
!= objectid
)
3534 /* we found an xattr, assume we've got an acl */
3535 if (found_key
.type
== BTRFS_XATTR_ITEM_KEY
) {
3536 if (*first_xattr_slot
== -1)
3537 *first_xattr_slot
= slot
;
3538 if (found_key
.offset
== xattr_access
||
3539 found_key
.offset
== xattr_default
)
3544 * we found a key greater than an xattr key, there can't
3545 * be any acls later on
3547 if (found_key
.type
> BTRFS_XATTR_ITEM_KEY
)
3554 * it goes inode, inode backrefs, xattrs, extents,
3555 * so if there are a ton of hard links to an inode there can
3556 * be a lot of backrefs. Don't waste time searching too hard,
3557 * this is just an optimization
3562 /* we hit the end of the leaf before we found an xattr or
3563 * something larger than an xattr. We have to assume the inode
3566 if (*first_xattr_slot
== -1)
3567 *first_xattr_slot
= slot
;
3572 * read an inode from the btree into the in-memory inode
3574 static int btrfs_read_locked_inode(struct inode
*inode
)
3576 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
3577 struct btrfs_path
*path
;
3578 struct extent_buffer
*leaf
;
3579 struct btrfs_inode_item
*inode_item
;
3580 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
3581 struct btrfs_key location
;
3586 bool filled
= false;
3587 int first_xattr_slot
;
3589 ret
= btrfs_fill_inode(inode
, &rdev
);
3593 path
= btrfs_alloc_path();
3599 memcpy(&location
, &BTRFS_I(inode
)->location
, sizeof(location
));
3601 ret
= btrfs_lookup_inode(NULL
, root
, path
, &location
, 0);
3608 leaf
= path
->nodes
[0];
3613 inode_item
= btrfs_item_ptr(leaf
, path
->slots
[0],
3614 struct btrfs_inode_item
);
3615 inode
->i_mode
= btrfs_inode_mode(leaf
, inode_item
);
3616 set_nlink(inode
, btrfs_inode_nlink(leaf
, inode_item
));
3617 i_uid_write(inode
, btrfs_inode_uid(leaf
, inode_item
));
3618 i_gid_write(inode
, btrfs_inode_gid(leaf
, inode_item
));
3619 btrfs_i_size_write(BTRFS_I(inode
), btrfs_inode_size(leaf
, inode_item
));
3621 inode
->i_atime
.tv_sec
= btrfs_timespec_sec(leaf
, &inode_item
->atime
);
3622 inode
->i_atime
.tv_nsec
= btrfs_timespec_nsec(leaf
, &inode_item
->atime
);
3624 inode
->i_mtime
.tv_sec
= btrfs_timespec_sec(leaf
, &inode_item
->mtime
);
3625 inode
->i_mtime
.tv_nsec
= btrfs_timespec_nsec(leaf
, &inode_item
->mtime
);
3627 inode
->i_ctime
.tv_sec
= btrfs_timespec_sec(leaf
, &inode_item
->ctime
);
3628 inode
->i_ctime
.tv_nsec
= btrfs_timespec_nsec(leaf
, &inode_item
->ctime
);
3630 BTRFS_I(inode
)->i_otime
.tv_sec
=
3631 btrfs_timespec_sec(leaf
, &inode_item
->otime
);
3632 BTRFS_I(inode
)->i_otime
.tv_nsec
=
3633 btrfs_timespec_nsec(leaf
, &inode_item
->otime
);
3635 inode_set_bytes(inode
, btrfs_inode_nbytes(leaf
, inode_item
));
3636 BTRFS_I(inode
)->generation
= btrfs_inode_generation(leaf
, inode_item
);
3637 BTRFS_I(inode
)->last_trans
= btrfs_inode_transid(leaf
, inode_item
);
3639 inode
->i_version
= btrfs_inode_sequence(leaf
, inode_item
);
3640 inode
->i_generation
= BTRFS_I(inode
)->generation
;
3642 rdev
= btrfs_inode_rdev(leaf
, inode_item
);
3644 BTRFS_I(inode
)->index_cnt
= (u64
)-1;
3645 BTRFS_I(inode
)->flags
= btrfs_inode_flags(leaf
, inode_item
);
3649 * If we were modified in the current generation and evicted from memory
3650 * and then re-read we need to do a full sync since we don't have any
3651 * idea about which extents were modified before we were evicted from
3654 * This is required for both inode re-read from disk and delayed inode
3655 * in delayed_nodes_tree.
3657 if (BTRFS_I(inode
)->last_trans
== fs_info
->generation
)
3658 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC
,
3659 &BTRFS_I(inode
)->runtime_flags
);
3662 * We don't persist the id of the transaction where an unlink operation
3663 * against the inode was last made. So here we assume the inode might
3664 * have been evicted, and therefore the exact value of last_unlink_trans
3665 * lost, and set it to last_trans to avoid metadata inconsistencies
3666 * between the inode and its parent if the inode is fsync'ed and the log
3667 * replayed. For example, in the scenario:
3670 * ln mydir/foo mydir/bar
3673 * echo 2 > /proc/sys/vm/drop_caches # evicts inode
3674 * xfs_io -c fsync mydir/foo
3676 * mount fs, triggers fsync log replay
3678 * We must make sure that when we fsync our inode foo we also log its
3679 * parent inode, otherwise after log replay the parent still has the
3680 * dentry with the "bar" name but our inode foo has a link count of 1
3681 * and doesn't have an inode ref with the name "bar" anymore.
3683 * Setting last_unlink_trans to last_trans is a pessimistic approach,
3684 * but it guarantees correctness at the expense of occasional full
3685 * transaction commits on fsync if our inode is a directory, or if our
3686 * inode is not a directory, logging its parent unnecessarily.
3688 BTRFS_I(inode
)->last_unlink_trans
= BTRFS_I(inode
)->last_trans
;
3691 if (inode
->i_nlink
!= 1 ||
3692 path
->slots
[0] >= btrfs_header_nritems(leaf
))
3695 btrfs_item_key_to_cpu(leaf
, &location
, path
->slots
[0]);
3696 if (location
.objectid
!= btrfs_ino(BTRFS_I(inode
)))
3699 ptr
= btrfs_item_ptr_offset(leaf
, path
->slots
[0]);
3700 if (location
.type
== BTRFS_INODE_REF_KEY
) {
3701 struct btrfs_inode_ref
*ref
;
3703 ref
= (struct btrfs_inode_ref
*)ptr
;
3704 BTRFS_I(inode
)->dir_index
= btrfs_inode_ref_index(leaf
, ref
);
3705 } else if (location
.type
== BTRFS_INODE_EXTREF_KEY
) {
3706 struct btrfs_inode_extref
*extref
;
3708 extref
= (struct btrfs_inode_extref
*)ptr
;
3709 BTRFS_I(inode
)->dir_index
= btrfs_inode_extref_index(leaf
,
3714 * try to precache a NULL acl entry for files that don't have
3715 * any xattrs or acls
3717 maybe_acls
= acls_after_inode_item(leaf
, path
->slots
[0],
3718 btrfs_ino(BTRFS_I(inode
)), &first_xattr_slot
);
3719 if (first_xattr_slot
!= -1) {
3720 path
->slots
[0] = first_xattr_slot
;
3721 ret
= btrfs_load_inode_props(inode
, path
);
3724 "error loading props for ino %llu (root %llu): %d",
3725 btrfs_ino(BTRFS_I(inode
)),
3726 root
->root_key
.objectid
, ret
);
3728 btrfs_free_path(path
);
3731 cache_no_acl(inode
);
3733 switch (inode
->i_mode
& S_IFMT
) {
3735 inode
->i_mapping
->a_ops
= &btrfs_aops
;
3736 BTRFS_I(inode
)->io_tree
.ops
= &btrfs_extent_io_ops
;
3737 inode
->i_fop
= &btrfs_file_operations
;
3738 inode
->i_op
= &btrfs_file_inode_operations
;
3741 inode
->i_fop
= &btrfs_dir_file_operations
;
3742 inode
->i_op
= &btrfs_dir_inode_operations
;
3745 inode
->i_op
= &btrfs_symlink_inode_operations
;
3746 inode_nohighmem(inode
);
3747 inode
->i_mapping
->a_ops
= &btrfs_symlink_aops
;
3750 inode
->i_op
= &btrfs_special_inode_operations
;
3751 init_special_inode(inode
, inode
->i_mode
, rdev
);
3755 btrfs_update_iflags(inode
);
3759 btrfs_free_path(path
);
3760 make_bad_inode(inode
);
3765 * given a leaf and an inode, copy the inode fields into the leaf
3767 static void fill_inode_item(struct btrfs_trans_handle
*trans
,
3768 struct extent_buffer
*leaf
,
3769 struct btrfs_inode_item
*item
,
3770 struct inode
*inode
)
3772 struct btrfs_map_token token
;
3774 btrfs_init_map_token(&token
);
3776 btrfs_set_token_inode_uid(leaf
, item
, i_uid_read(inode
), &token
);
3777 btrfs_set_token_inode_gid(leaf
, item
, i_gid_read(inode
), &token
);
3778 btrfs_set_token_inode_size(leaf
, item
, BTRFS_I(inode
)->disk_i_size
,
3780 btrfs_set_token_inode_mode(leaf
, item
, inode
->i_mode
, &token
);
3781 btrfs_set_token_inode_nlink(leaf
, item
, inode
->i_nlink
, &token
);
3783 btrfs_set_token_timespec_sec(leaf
, &item
->atime
,
3784 inode
->i_atime
.tv_sec
, &token
);
3785 btrfs_set_token_timespec_nsec(leaf
, &item
->atime
,
3786 inode
->i_atime
.tv_nsec
, &token
);
3788 btrfs_set_token_timespec_sec(leaf
, &item
->mtime
,
3789 inode
->i_mtime
.tv_sec
, &token
);
3790 btrfs_set_token_timespec_nsec(leaf
, &item
->mtime
,
3791 inode
->i_mtime
.tv_nsec
, &token
);
3793 btrfs_set_token_timespec_sec(leaf
, &item
->ctime
,
3794 inode
->i_ctime
.tv_sec
, &token
);
3795 btrfs_set_token_timespec_nsec(leaf
, &item
->ctime
,
3796 inode
->i_ctime
.tv_nsec
, &token
);
3798 btrfs_set_token_timespec_sec(leaf
, &item
->otime
,
3799 BTRFS_I(inode
)->i_otime
.tv_sec
, &token
);
3800 btrfs_set_token_timespec_nsec(leaf
, &item
->otime
,
3801 BTRFS_I(inode
)->i_otime
.tv_nsec
, &token
);
3803 btrfs_set_token_inode_nbytes(leaf
, item
, inode_get_bytes(inode
),
3805 btrfs_set_token_inode_generation(leaf
, item
, BTRFS_I(inode
)->generation
,
3807 btrfs_set_token_inode_sequence(leaf
, item
, inode
->i_version
, &token
);
3808 btrfs_set_token_inode_transid(leaf
, item
, trans
->transid
, &token
);
3809 btrfs_set_token_inode_rdev(leaf
, item
, inode
->i_rdev
, &token
);
3810 btrfs_set_token_inode_flags(leaf
, item
, BTRFS_I(inode
)->flags
, &token
);
3811 btrfs_set_token_inode_block_group(leaf
, item
, 0, &token
);
3815 * copy everything in the in-memory inode into the btree.
3817 static noinline
int btrfs_update_inode_item(struct btrfs_trans_handle
*trans
,
3818 struct btrfs_root
*root
, struct inode
*inode
)
3820 struct btrfs_inode_item
*inode_item
;
3821 struct btrfs_path
*path
;
3822 struct extent_buffer
*leaf
;
3825 path
= btrfs_alloc_path();
3829 path
->leave_spinning
= 1;
3830 ret
= btrfs_lookup_inode(trans
, root
, path
, &BTRFS_I(inode
)->location
,
3838 leaf
= path
->nodes
[0];
3839 inode_item
= btrfs_item_ptr(leaf
, path
->slots
[0],
3840 struct btrfs_inode_item
);
3842 fill_inode_item(trans
, leaf
, inode_item
, inode
);
3843 btrfs_mark_buffer_dirty(leaf
);
3844 btrfs_set_inode_last_trans(trans
, inode
);
3847 btrfs_free_path(path
);
3852 * copy everything in the in-memory inode into the btree.
3854 noinline
int btrfs_update_inode(struct btrfs_trans_handle
*trans
,
3855 struct btrfs_root
*root
, struct inode
*inode
)
3857 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
3861 * If the inode is a free space inode, we can deadlock during commit
3862 * if we put it into the delayed code.
3864 * The data relocation inode should also be directly updated
3867 if (!btrfs_is_free_space_inode(BTRFS_I(inode
))
3868 && root
->root_key
.objectid
!= BTRFS_DATA_RELOC_TREE_OBJECTID
3869 && !test_bit(BTRFS_FS_LOG_RECOVERING
, &fs_info
->flags
)) {
3870 btrfs_update_root_times(trans
, root
);
3872 ret
= btrfs_delayed_update_inode(trans
, root
, inode
);
3874 btrfs_set_inode_last_trans(trans
, inode
);
3878 return btrfs_update_inode_item(trans
, root
, inode
);
3881 noinline
int btrfs_update_inode_fallback(struct btrfs_trans_handle
*trans
,
3882 struct btrfs_root
*root
,
3883 struct inode
*inode
)
3887 ret
= btrfs_update_inode(trans
, root
, inode
);
3889 return btrfs_update_inode_item(trans
, root
, inode
);
3894 * unlink helper that gets used here in inode.c and in the tree logging
3895 * recovery code. It remove a link in a directory with a given name, and
3896 * also drops the back refs in the inode to the directory
3898 static int __btrfs_unlink_inode(struct btrfs_trans_handle
*trans
,
3899 struct btrfs_root
*root
,
3900 struct btrfs_inode
*dir
,
3901 struct btrfs_inode
*inode
,
3902 const char *name
, int name_len
)
3904 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
3905 struct btrfs_path
*path
;
3907 struct extent_buffer
*leaf
;
3908 struct btrfs_dir_item
*di
;
3909 struct btrfs_key key
;
3911 u64 ino
= btrfs_ino(inode
);
3912 u64 dir_ino
= btrfs_ino(dir
);
3914 path
= btrfs_alloc_path();
3920 path
->leave_spinning
= 1;
3921 di
= btrfs_lookup_dir_item(trans
, root
, path
, dir_ino
,
3922 name
, name_len
, -1);
3931 leaf
= path
->nodes
[0];
3932 btrfs_dir_item_key_to_cpu(leaf
, di
, &key
);
3933 ret
= btrfs_delete_one_dir_name(trans
, root
, path
, di
);
3936 btrfs_release_path(path
);
3939 * If we don't have dir index, we have to get it by looking up
3940 * the inode ref, since we get the inode ref, remove it directly,
3941 * it is unnecessary to do delayed deletion.
3943 * But if we have dir index, needn't search inode ref to get it.
3944 * Since the inode ref is close to the inode item, it is better
3945 * that we delay to delete it, and just do this deletion when
3946 * we update the inode item.
3948 if (inode
->dir_index
) {
3949 ret
= btrfs_delayed_delete_inode_ref(inode
);
3951 index
= inode
->dir_index
;
3956 ret
= btrfs_del_inode_ref(trans
, root
, name
, name_len
, ino
,
3960 "failed to delete reference to %.*s, inode %llu parent %llu",
3961 name_len
, name
, ino
, dir_ino
);
3962 btrfs_abort_transaction(trans
, ret
);
3966 ret
= btrfs_delete_delayed_dir_index(trans
, fs_info
, dir
, index
);
3968 btrfs_abort_transaction(trans
, ret
);
3972 ret
= btrfs_del_inode_ref_in_log(trans
, root
, name
, name_len
, inode
,
3974 if (ret
!= 0 && ret
!= -ENOENT
) {
3975 btrfs_abort_transaction(trans
, ret
);
3979 ret
= btrfs_del_dir_entries_in_log(trans
, root
, name
, name_len
, dir
,
3984 btrfs_abort_transaction(trans
, ret
);
3986 btrfs_free_path(path
);
3990 btrfs_i_size_write(dir
, dir
->vfs_inode
.i_size
- name_len
* 2);
3991 inode_inc_iversion(&inode
->vfs_inode
);
3992 inode_inc_iversion(&dir
->vfs_inode
);
3993 inode
->vfs_inode
.i_ctime
= dir
->vfs_inode
.i_mtime
=
3994 dir
->vfs_inode
.i_ctime
= current_time(&inode
->vfs_inode
);
3995 ret
= btrfs_update_inode(trans
, root
, &dir
->vfs_inode
);
4000 int btrfs_unlink_inode(struct btrfs_trans_handle
*trans
,
4001 struct btrfs_root
*root
,
4002 struct btrfs_inode
*dir
, struct btrfs_inode
*inode
,
4003 const char *name
, int name_len
)
4006 ret
= __btrfs_unlink_inode(trans
, root
, dir
, inode
, name
, name_len
);
4008 drop_nlink(&inode
->vfs_inode
);
4009 ret
= btrfs_update_inode(trans
, root
, &inode
->vfs_inode
);
4015 * helper to start transaction for unlink and rmdir.
4017 * unlink and rmdir are special in btrfs, they do not always free space, so
4018 * if we cannot make our reservations the normal way try and see if there is
4019 * plenty of slack room in the global reserve to migrate, otherwise we cannot
4020 * allow the unlink to occur.
4022 static struct btrfs_trans_handle
*__unlink_start_trans(struct inode
*dir
)
4024 struct btrfs_root
*root
= BTRFS_I(dir
)->root
;
4027 * 1 for the possible orphan item
4028 * 1 for the dir item
4029 * 1 for the dir index
4030 * 1 for the inode ref
4033 return btrfs_start_transaction_fallback_global_rsv(root
, 5, 5);
4036 static int btrfs_unlink(struct inode
*dir
, struct dentry
*dentry
)
4038 struct btrfs_root
*root
= BTRFS_I(dir
)->root
;
4039 struct btrfs_trans_handle
*trans
;
4040 struct inode
*inode
= d_inode(dentry
);
4043 trans
= __unlink_start_trans(dir
);
4045 return PTR_ERR(trans
);
4047 btrfs_record_unlink_dir(trans
, BTRFS_I(dir
), BTRFS_I(d_inode(dentry
)),
4050 ret
= btrfs_unlink_inode(trans
, root
, BTRFS_I(dir
),
4051 BTRFS_I(d_inode(dentry
)), dentry
->d_name
.name
,
4052 dentry
->d_name
.len
);
4056 if (inode
->i_nlink
== 0) {
4057 ret
= btrfs_orphan_add(trans
, BTRFS_I(inode
));
4063 btrfs_end_transaction(trans
);
4064 btrfs_btree_balance_dirty(root
->fs_info
);
4068 int btrfs_unlink_subvol(struct btrfs_trans_handle
*trans
,
4069 struct btrfs_root
*root
,
4070 struct inode
*dir
, u64 objectid
,
4071 const char *name
, int name_len
)
4073 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
4074 struct btrfs_path
*path
;
4075 struct extent_buffer
*leaf
;
4076 struct btrfs_dir_item
*di
;
4077 struct btrfs_key key
;
4080 u64 dir_ino
= btrfs_ino(BTRFS_I(dir
));
4082 path
= btrfs_alloc_path();
4086 di
= btrfs_lookup_dir_item(trans
, root
, path
, dir_ino
,
4087 name
, name_len
, -1);
4088 if (IS_ERR_OR_NULL(di
)) {
4096 leaf
= path
->nodes
[0];
4097 btrfs_dir_item_key_to_cpu(leaf
, di
, &key
);
4098 WARN_ON(key
.type
!= BTRFS_ROOT_ITEM_KEY
|| key
.objectid
!= objectid
);
4099 ret
= btrfs_delete_one_dir_name(trans
, root
, path
, di
);
4101 btrfs_abort_transaction(trans
, ret
);
4104 btrfs_release_path(path
);
4106 ret
= btrfs_del_root_ref(trans
, fs_info
, objectid
,
4107 root
->root_key
.objectid
, dir_ino
,
4108 &index
, name
, name_len
);
4110 if (ret
!= -ENOENT
) {
4111 btrfs_abort_transaction(trans
, ret
);
4114 di
= btrfs_search_dir_index_item(root
, path
, dir_ino
,
4116 if (IS_ERR_OR_NULL(di
)) {
4121 btrfs_abort_transaction(trans
, ret
);
4125 leaf
= path
->nodes
[0];
4126 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
4127 btrfs_release_path(path
);
4130 btrfs_release_path(path
);
4132 ret
= btrfs_delete_delayed_dir_index(trans
, fs_info
, BTRFS_I(dir
), index
);
4134 btrfs_abort_transaction(trans
, ret
);
4138 btrfs_i_size_write(BTRFS_I(dir
), dir
->i_size
- name_len
* 2);
4139 inode_inc_iversion(dir
);
4140 dir
->i_mtime
= dir
->i_ctime
= current_time(dir
);
4141 ret
= btrfs_update_inode_fallback(trans
, root
, dir
);
4143 btrfs_abort_transaction(trans
, ret
);
4145 btrfs_free_path(path
);
4149 static int btrfs_rmdir(struct inode
*dir
, struct dentry
*dentry
)
4151 struct inode
*inode
= d_inode(dentry
);
4153 struct btrfs_root
*root
= BTRFS_I(dir
)->root
;
4154 struct btrfs_trans_handle
*trans
;
4155 u64 last_unlink_trans
;
4157 if (inode
->i_size
> BTRFS_EMPTY_DIR_SIZE
)
4159 if (btrfs_ino(BTRFS_I(inode
)) == BTRFS_FIRST_FREE_OBJECTID
)
4162 trans
= __unlink_start_trans(dir
);
4164 return PTR_ERR(trans
);
4166 if (unlikely(btrfs_ino(BTRFS_I(inode
)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID
)) {
4167 err
= btrfs_unlink_subvol(trans
, root
, dir
,
4168 BTRFS_I(inode
)->location
.objectid
,
4169 dentry
->d_name
.name
,
4170 dentry
->d_name
.len
);
4174 err
= btrfs_orphan_add(trans
, BTRFS_I(inode
));
4178 last_unlink_trans
= BTRFS_I(inode
)->last_unlink_trans
;
4180 /* now the directory is empty */
4181 err
= btrfs_unlink_inode(trans
, root
, BTRFS_I(dir
),
4182 BTRFS_I(d_inode(dentry
)), dentry
->d_name
.name
,
4183 dentry
->d_name
.len
);
4185 btrfs_i_size_write(BTRFS_I(inode
), 0);
4187 * Propagate the last_unlink_trans value of the deleted dir to
4188 * its parent directory. This is to prevent an unrecoverable
4189 * log tree in the case we do something like this:
4191 * 2) create snapshot under dir foo
4192 * 3) delete the snapshot
4195 * 6) fsync foo or some file inside foo
4197 if (last_unlink_trans
>= trans
->transid
)
4198 BTRFS_I(dir
)->last_unlink_trans
= last_unlink_trans
;
4201 btrfs_end_transaction(trans
);
4202 btrfs_btree_balance_dirty(root
->fs_info
);
4207 static int truncate_space_check(struct btrfs_trans_handle
*trans
,
4208 struct btrfs_root
*root
,
4211 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
4215 * This is only used to apply pressure to the enospc system, we don't
4216 * intend to use this reservation at all.
4218 bytes_deleted
= btrfs_csum_bytes_to_leaves(fs_info
, bytes_deleted
);
4219 bytes_deleted
*= fs_info
->nodesize
;
4220 ret
= btrfs_block_rsv_add(root
, &fs_info
->trans_block_rsv
,
4221 bytes_deleted
, BTRFS_RESERVE_NO_FLUSH
);
4223 trace_btrfs_space_reservation(fs_info
, "transaction",
4226 trans
->bytes_reserved
+= bytes_deleted
;
4232 static int truncate_inline_extent(struct inode
*inode
,
4233 struct btrfs_path
*path
,
4234 struct btrfs_key
*found_key
,
4238 struct extent_buffer
*leaf
= path
->nodes
[0];
4239 int slot
= path
->slots
[0];
4240 struct btrfs_file_extent_item
*fi
;
4241 u32 size
= (u32
)(new_size
- found_key
->offset
);
4242 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
4244 fi
= btrfs_item_ptr(leaf
, slot
, struct btrfs_file_extent_item
);
4246 if (btrfs_file_extent_compression(leaf
, fi
) != BTRFS_COMPRESS_NONE
) {
4247 loff_t offset
= new_size
;
4248 loff_t page_end
= ALIGN(offset
, PAGE_SIZE
);
4251 * Zero out the remaining of the last page of our inline extent,
4252 * instead of directly truncating our inline extent here - that
4253 * would be much more complex (decompressing all the data, then
4254 * compressing the truncated data, which might be bigger than
4255 * the size of the inline extent, resize the extent, etc).
4256 * We release the path because to get the page we might need to
4257 * read the extent item from disk (data not in the page cache).
4259 btrfs_release_path(path
);
4260 return btrfs_truncate_block(inode
, offset
, page_end
- offset
,
4264 btrfs_set_file_extent_ram_bytes(leaf
, fi
, size
);
4265 size
= btrfs_file_extent_calc_inline_size(size
);
4266 btrfs_truncate_item(root
->fs_info
, path
, size
, 1);
4268 if (test_bit(BTRFS_ROOT_REF_COWS
, &root
->state
))
4269 inode_sub_bytes(inode
, item_end
+ 1 - new_size
);
4275 * this can truncate away extent items, csum items and directory items.
4276 * It starts at a high offset and removes keys until it can't find
4277 * any higher than new_size
4279 * csum items that cross the new i_size are truncated to the new size
4282 * min_type is the minimum key type to truncate down to. If set to 0, this
4283 * will kill all the items on this inode, including the INODE_ITEM_KEY.
4285 int btrfs_truncate_inode_items(struct btrfs_trans_handle
*trans
,
4286 struct btrfs_root
*root
,
4287 struct inode
*inode
,
4288 u64 new_size
, u32 min_type
)
4290 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
4291 struct btrfs_path
*path
;
4292 struct extent_buffer
*leaf
;
4293 struct btrfs_file_extent_item
*fi
;
4294 struct btrfs_key key
;
4295 struct btrfs_key found_key
;
4296 u64 extent_start
= 0;
4297 u64 extent_num_bytes
= 0;
4298 u64 extent_offset
= 0;
4300 u64 last_size
= new_size
;
4301 u32 found_type
= (u8
)-1;
4304 int pending_del_nr
= 0;
4305 int pending_del_slot
= 0;
4306 int extent_type
= -1;
4309 u64 ino
= btrfs_ino(BTRFS_I(inode
));
4310 u64 bytes_deleted
= 0;
4312 bool should_throttle
= 0;
4313 bool should_end
= 0;
4315 BUG_ON(new_size
> 0 && min_type
!= BTRFS_EXTENT_DATA_KEY
);
4318 * for non-free space inodes and ref cows, we want to back off from
4321 if (!btrfs_is_free_space_inode(BTRFS_I(inode
)) &&
4322 test_bit(BTRFS_ROOT_REF_COWS
, &root
->state
))
4325 path
= btrfs_alloc_path();
4328 path
->reada
= READA_BACK
;
4331 * We want to drop from the next block forward in case this new size is
4332 * not block aligned since we will be keeping the last block of the
4333 * extent just the way it is.
4335 if (test_bit(BTRFS_ROOT_REF_COWS
, &root
->state
) ||
4336 root
== fs_info
->tree_root
)
4337 btrfs_drop_extent_cache(BTRFS_I(inode
), ALIGN(new_size
,
4338 fs_info
->sectorsize
),
4342 * This function is also used to drop the items in the log tree before
4343 * we relog the inode, so if root != BTRFS_I(inode)->root, it means
4344 * it is used to drop the loged items. So we shouldn't kill the delayed
4347 if (min_type
== 0 && root
== BTRFS_I(inode
)->root
)
4348 btrfs_kill_delayed_inode_items(BTRFS_I(inode
));
4351 key
.offset
= (u64
)-1;
4356 * with a 16K leaf size and 128MB extents, you can actually queue
4357 * up a huge file in a single leaf. Most of the time that
4358 * bytes_deleted is > 0, it will be huge by the time we get here
4360 if (be_nice
&& bytes_deleted
> SZ_32M
) {
4361 if (btrfs_should_end_transaction(trans
)) {
4368 path
->leave_spinning
= 1;
4369 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
4376 /* there are no items in the tree for us to truncate, we're
4379 if (path
->slots
[0] == 0)
4386 leaf
= path
->nodes
[0];
4387 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
4388 found_type
= found_key
.type
;
4390 if (found_key
.objectid
!= ino
)
4393 if (found_type
< min_type
)
4396 item_end
= found_key
.offset
;
4397 if (found_type
== BTRFS_EXTENT_DATA_KEY
) {
4398 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
4399 struct btrfs_file_extent_item
);
4400 extent_type
= btrfs_file_extent_type(leaf
, fi
);
4401 if (extent_type
!= BTRFS_FILE_EXTENT_INLINE
) {
4403 btrfs_file_extent_num_bytes(leaf
, fi
);
4404 } else if (extent_type
== BTRFS_FILE_EXTENT_INLINE
) {
4405 item_end
+= btrfs_file_extent_inline_len(leaf
,
4406 path
->slots
[0], fi
);
4410 if (found_type
> min_type
) {
4413 if (item_end
< new_size
)
4415 if (found_key
.offset
>= new_size
)
4421 /* FIXME, shrink the extent if the ref count is only 1 */
4422 if (found_type
!= BTRFS_EXTENT_DATA_KEY
)
4426 last_size
= found_key
.offset
;
4428 last_size
= new_size
;
4430 if (extent_type
!= BTRFS_FILE_EXTENT_INLINE
) {
4432 extent_start
= btrfs_file_extent_disk_bytenr(leaf
, fi
);
4434 u64 orig_num_bytes
=
4435 btrfs_file_extent_num_bytes(leaf
, fi
);
4436 extent_num_bytes
= ALIGN(new_size
-
4438 fs_info
->sectorsize
);
4439 btrfs_set_file_extent_num_bytes(leaf
, fi
,
4441 num_dec
= (orig_num_bytes
-
4443 if (test_bit(BTRFS_ROOT_REF_COWS
,
4446 inode_sub_bytes(inode
, num_dec
);
4447 btrfs_mark_buffer_dirty(leaf
);
4450 btrfs_file_extent_disk_num_bytes(leaf
,
4452 extent_offset
= found_key
.offset
-
4453 btrfs_file_extent_offset(leaf
, fi
);
4455 /* FIXME blocksize != 4096 */
4456 num_dec
= btrfs_file_extent_num_bytes(leaf
, fi
);
4457 if (extent_start
!= 0) {
4459 if (test_bit(BTRFS_ROOT_REF_COWS
,
4461 inode_sub_bytes(inode
, num_dec
);
4464 } else if (extent_type
== BTRFS_FILE_EXTENT_INLINE
) {
4466 * we can't truncate inline items that have had
4470 btrfs_file_extent_encryption(leaf
, fi
) == 0 &&
4471 btrfs_file_extent_other_encoding(leaf
, fi
) == 0) {
4474 * Need to release path in order to truncate a
4475 * compressed extent. So delete any accumulated
4476 * extent items so far.
4478 if (btrfs_file_extent_compression(leaf
, fi
) !=
4479 BTRFS_COMPRESS_NONE
&& pending_del_nr
) {
4480 err
= btrfs_del_items(trans
, root
, path
,
4484 btrfs_abort_transaction(trans
,
4491 err
= truncate_inline_extent(inode
, path
,
4496 btrfs_abort_transaction(trans
, err
);
4499 } else if (test_bit(BTRFS_ROOT_REF_COWS
,
4501 inode_sub_bytes(inode
, item_end
+ 1 - new_size
);
4506 if (!pending_del_nr
) {
4507 /* no pending yet, add ourselves */
4508 pending_del_slot
= path
->slots
[0];
4510 } else if (pending_del_nr
&&
4511 path
->slots
[0] + 1 == pending_del_slot
) {
4512 /* hop on the pending chunk */
4514 pending_del_slot
= path
->slots
[0];
4521 should_throttle
= 0;
4524 (test_bit(BTRFS_ROOT_REF_COWS
, &root
->state
) ||
4525 root
== fs_info
->tree_root
)) {
4526 btrfs_set_path_blocking(path
);
4527 bytes_deleted
+= extent_num_bytes
;
4528 ret
= btrfs_free_extent(trans
, fs_info
, extent_start
,
4529 extent_num_bytes
, 0,
4530 btrfs_header_owner(leaf
),
4531 ino
, extent_offset
);
4533 if (btrfs_should_throttle_delayed_refs(trans
, fs_info
))
4534 btrfs_async_run_delayed_refs(fs_info
,
4535 trans
->delayed_ref_updates
* 2,
4538 if (truncate_space_check(trans
, root
,
4539 extent_num_bytes
)) {
4542 if (btrfs_should_throttle_delayed_refs(trans
,
4544 should_throttle
= 1;
4548 if (found_type
== BTRFS_INODE_ITEM_KEY
)
4551 if (path
->slots
[0] == 0 ||
4552 path
->slots
[0] != pending_del_slot
||
4553 should_throttle
|| should_end
) {
4554 if (pending_del_nr
) {
4555 ret
= btrfs_del_items(trans
, root
, path
,
4559 btrfs_abort_transaction(trans
, ret
);
4564 btrfs_release_path(path
);
4565 if (should_throttle
) {
4566 unsigned long updates
= trans
->delayed_ref_updates
;
4568 trans
->delayed_ref_updates
= 0;
4569 ret
= btrfs_run_delayed_refs(trans
,
4577 * if we failed to refill our space rsv, bail out
4578 * and let the transaction restart
4590 if (pending_del_nr
) {
4591 ret
= btrfs_del_items(trans
, root
, path
, pending_del_slot
,
4594 btrfs_abort_transaction(trans
, ret
);
4597 if (root
->root_key
.objectid
!= BTRFS_TREE_LOG_OBJECTID
) {
4598 ASSERT(last_size
>= new_size
);
4599 if (!err
&& last_size
> new_size
)
4600 last_size
= new_size
;
4601 btrfs_ordered_update_i_size(inode
, last_size
, NULL
);
4604 btrfs_free_path(path
);
4607 /* only inline file may have last_size != new_size */
4608 if (new_size
>= fs_info
->sectorsize
||
4609 new_size
> fs_info
->max_inline
)
4610 ASSERT(last_size
== new_size
);
4613 if (be_nice
&& bytes_deleted
> SZ_32M
) {
4614 unsigned long updates
= trans
->delayed_ref_updates
;
4616 trans
->delayed_ref_updates
= 0;
4617 ret
= btrfs_run_delayed_refs(trans
, fs_info
,
4627 * btrfs_truncate_block - read, zero a chunk and write a block
4628 * @inode - inode that we're zeroing
4629 * @from - the offset to start zeroing
4630 * @len - the length to zero, 0 to zero the entire range respective to the
4632 * @front - zero up to the offset instead of from the offset on
4634 * This will find the block for the "from" offset and cow the block and zero the
4635 * part we want to zero. This is used with truncate and hole punching.
4637 int btrfs_truncate_block(struct inode
*inode
, loff_t from
, loff_t len
,
4640 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
4641 struct address_space
*mapping
= inode
->i_mapping
;
4642 struct extent_io_tree
*io_tree
= &BTRFS_I(inode
)->io_tree
;
4643 struct btrfs_ordered_extent
*ordered
;
4644 struct extent_state
*cached_state
= NULL
;
4646 u32 blocksize
= fs_info
->sectorsize
;
4647 pgoff_t index
= from
>> PAGE_SHIFT
;
4648 unsigned offset
= from
& (blocksize
- 1);
4650 gfp_t mask
= btrfs_alloc_write_mask(mapping
);
4655 if ((offset
& (blocksize
- 1)) == 0 &&
4656 (!len
|| ((len
& (blocksize
- 1)) == 0)))
4659 ret
= btrfs_delalloc_reserve_space(inode
,
4660 round_down(from
, blocksize
), blocksize
);
4665 page
= find_or_create_page(mapping
, index
, mask
);
4667 btrfs_delalloc_release_space(inode
,
4668 round_down(from
, blocksize
),
4674 block_start
= round_down(from
, blocksize
);
4675 block_end
= block_start
+ blocksize
- 1;
4677 if (!PageUptodate(page
)) {
4678 ret
= btrfs_readpage(NULL
, page
);
4680 if (page
->mapping
!= mapping
) {
4685 if (!PageUptodate(page
)) {
4690 wait_on_page_writeback(page
);
4692 lock_extent_bits(io_tree
, block_start
, block_end
, &cached_state
);
4693 set_page_extent_mapped(page
);
4695 ordered
= btrfs_lookup_ordered_extent(inode
, block_start
);
4697 unlock_extent_cached(io_tree
, block_start
, block_end
,
4698 &cached_state
, GFP_NOFS
);
4701 btrfs_start_ordered_extent(inode
, ordered
, 1);
4702 btrfs_put_ordered_extent(ordered
);
4706 clear_extent_bit(&BTRFS_I(inode
)->io_tree
, block_start
, block_end
,
4707 EXTENT_DIRTY
| EXTENT_DELALLOC
|
4708 EXTENT_DO_ACCOUNTING
| EXTENT_DEFRAG
,
4709 0, 0, &cached_state
, GFP_NOFS
);
4711 ret
= btrfs_set_extent_delalloc(inode
, block_start
, block_end
,
4714 unlock_extent_cached(io_tree
, block_start
, block_end
,
4715 &cached_state
, GFP_NOFS
);
4719 if (offset
!= blocksize
) {
4721 len
= blocksize
- offset
;
4724 memset(kaddr
+ (block_start
- page_offset(page
)),
4727 memset(kaddr
+ (block_start
- page_offset(page
)) + offset
,
4729 flush_dcache_page(page
);
4732 ClearPageChecked(page
);
4733 set_page_dirty(page
);
4734 unlock_extent_cached(io_tree
, block_start
, block_end
, &cached_state
,
4739 btrfs_delalloc_release_space(inode
, block_start
,
4747 static int maybe_insert_hole(struct btrfs_root
*root
, struct inode
*inode
,
4748 u64 offset
, u64 len
)
4750 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
4751 struct btrfs_trans_handle
*trans
;
4755 * Still need to make sure the inode looks like it's been updated so
4756 * that any holes get logged if we fsync.
4758 if (btrfs_fs_incompat(fs_info
, NO_HOLES
)) {
4759 BTRFS_I(inode
)->last_trans
= fs_info
->generation
;
4760 BTRFS_I(inode
)->last_sub_trans
= root
->log_transid
;
4761 BTRFS_I(inode
)->last_log_commit
= root
->last_log_commit
;
4766 * 1 - for the one we're dropping
4767 * 1 - for the one we're adding
4768 * 1 - for updating the inode.
4770 trans
= btrfs_start_transaction(root
, 3);
4772 return PTR_ERR(trans
);
4774 ret
= btrfs_drop_extents(trans
, root
, inode
, offset
, offset
+ len
, 1);
4776 btrfs_abort_transaction(trans
, ret
);
4777 btrfs_end_transaction(trans
);
4781 ret
= btrfs_insert_file_extent(trans
, root
, btrfs_ino(BTRFS_I(inode
)),
4782 offset
, 0, 0, len
, 0, len
, 0, 0, 0);
4784 btrfs_abort_transaction(trans
, ret
);
4786 btrfs_update_inode(trans
, root
, inode
);
4787 btrfs_end_transaction(trans
);
4792 * This function puts in dummy file extents for the area we're creating a hole
4793 * for. So if we are truncating this file to a larger size we need to insert
4794 * these file extents so that btrfs_get_extent will return a EXTENT_MAP_HOLE for
4795 * the range between oldsize and size
4797 int btrfs_cont_expand(struct inode
*inode
, loff_t oldsize
, loff_t size
)
4799 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
4800 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
4801 struct extent_io_tree
*io_tree
= &BTRFS_I(inode
)->io_tree
;
4802 struct extent_map
*em
= NULL
;
4803 struct extent_state
*cached_state
= NULL
;
4804 struct extent_map_tree
*em_tree
= &BTRFS_I(inode
)->extent_tree
;
4805 u64 hole_start
= ALIGN(oldsize
, fs_info
->sectorsize
);
4806 u64 block_end
= ALIGN(size
, fs_info
->sectorsize
);
4813 * If our size started in the middle of a block we need to zero out the
4814 * rest of the block before we expand the i_size, otherwise we could
4815 * expose stale data.
4817 err
= btrfs_truncate_block(inode
, oldsize
, 0, 0);
4821 if (size
<= hole_start
)
4825 struct btrfs_ordered_extent
*ordered
;
4827 lock_extent_bits(io_tree
, hole_start
, block_end
- 1,
4829 ordered
= btrfs_lookup_ordered_range(BTRFS_I(inode
), hole_start
,
4830 block_end
- hole_start
);
4833 unlock_extent_cached(io_tree
, hole_start
, block_end
- 1,
4834 &cached_state
, GFP_NOFS
);
4835 btrfs_start_ordered_extent(inode
, ordered
, 1);
4836 btrfs_put_ordered_extent(ordered
);
4839 cur_offset
= hole_start
;
4841 em
= btrfs_get_extent(BTRFS_I(inode
), NULL
, 0, cur_offset
,
4842 block_end
- cur_offset
, 0);
4848 last_byte
= min(extent_map_end(em
), block_end
);
4849 last_byte
= ALIGN(last_byte
, fs_info
->sectorsize
);
4850 if (!test_bit(EXTENT_FLAG_PREALLOC
, &em
->flags
)) {
4851 struct extent_map
*hole_em
;
4852 hole_size
= last_byte
- cur_offset
;
4854 err
= maybe_insert_hole(root
, inode
, cur_offset
,
4858 btrfs_drop_extent_cache(BTRFS_I(inode
), cur_offset
,
4859 cur_offset
+ hole_size
- 1, 0);
4860 hole_em
= alloc_extent_map();
4862 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC
,
4863 &BTRFS_I(inode
)->runtime_flags
);
4866 hole_em
->start
= cur_offset
;
4867 hole_em
->len
= hole_size
;
4868 hole_em
->orig_start
= cur_offset
;
4870 hole_em
->block_start
= EXTENT_MAP_HOLE
;
4871 hole_em
->block_len
= 0;
4872 hole_em
->orig_block_len
= 0;
4873 hole_em
->ram_bytes
= hole_size
;
4874 hole_em
->bdev
= fs_info
->fs_devices
->latest_bdev
;
4875 hole_em
->compress_type
= BTRFS_COMPRESS_NONE
;
4876 hole_em
->generation
= fs_info
->generation
;
4879 write_lock(&em_tree
->lock
);
4880 err
= add_extent_mapping(em_tree
, hole_em
, 1);
4881 write_unlock(&em_tree
->lock
);
4884 btrfs_drop_extent_cache(BTRFS_I(inode
),
4889 free_extent_map(hole_em
);
4892 free_extent_map(em
);
4894 cur_offset
= last_byte
;
4895 if (cur_offset
>= block_end
)
4898 free_extent_map(em
);
4899 unlock_extent_cached(io_tree
, hole_start
, block_end
- 1, &cached_state
,
4904 static int btrfs_setsize(struct inode
*inode
, struct iattr
*attr
)
4906 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
4907 struct btrfs_trans_handle
*trans
;
4908 loff_t oldsize
= i_size_read(inode
);
4909 loff_t newsize
= attr
->ia_size
;
4910 int mask
= attr
->ia_valid
;
4914 * The regular truncate() case without ATTR_CTIME and ATTR_MTIME is a
4915 * special case where we need to update the times despite not having
4916 * these flags set. For all other operations the VFS set these flags
4917 * explicitly if it wants a timestamp update.
4919 if (newsize
!= oldsize
) {
4920 inode_inc_iversion(inode
);
4921 if (!(mask
& (ATTR_CTIME
| ATTR_MTIME
)))
4922 inode
->i_ctime
= inode
->i_mtime
=
4923 current_time(inode
);
4926 if (newsize
> oldsize
) {
4928 * Don't do an expanding truncate while snapshoting is ongoing.
4929 * This is to ensure the snapshot captures a fully consistent
4930 * state of this file - if the snapshot captures this expanding
4931 * truncation, it must capture all writes that happened before
4934 btrfs_wait_for_snapshot_creation(root
);
4935 ret
= btrfs_cont_expand(inode
, oldsize
, newsize
);
4937 btrfs_end_write_no_snapshoting(root
);
4941 trans
= btrfs_start_transaction(root
, 1);
4942 if (IS_ERR(trans
)) {
4943 btrfs_end_write_no_snapshoting(root
);
4944 return PTR_ERR(trans
);
4947 i_size_write(inode
, newsize
);
4948 btrfs_ordered_update_i_size(inode
, i_size_read(inode
), NULL
);
4949 pagecache_isize_extended(inode
, oldsize
, newsize
);
4950 ret
= btrfs_update_inode(trans
, root
, inode
);
4951 btrfs_end_write_no_snapshoting(root
);
4952 btrfs_end_transaction(trans
);
4956 * We're truncating a file that used to have good data down to
4957 * zero. Make sure it gets into the ordered flush list so that
4958 * any new writes get down to disk quickly.
4961 set_bit(BTRFS_INODE_ORDERED_DATA_CLOSE
,
4962 &BTRFS_I(inode
)->runtime_flags
);
4965 * 1 for the orphan item we're going to add
4966 * 1 for the orphan item deletion.
4968 trans
= btrfs_start_transaction(root
, 2);
4970 return PTR_ERR(trans
);
4973 * We need to do this in case we fail at _any_ point during the
4974 * actual truncate. Once we do the truncate_setsize we could
4975 * invalidate pages which forces any outstanding ordered io to
4976 * be instantly completed which will give us extents that need
4977 * to be truncated. If we fail to get an orphan inode down we
4978 * could have left over extents that were never meant to live,
4979 * so we need to guarantee from this point on that everything
4980 * will be consistent.
4982 ret
= btrfs_orphan_add(trans
, BTRFS_I(inode
));
4983 btrfs_end_transaction(trans
);
4987 /* we don't support swapfiles, so vmtruncate shouldn't fail */
4988 truncate_setsize(inode
, newsize
);
4990 /* Disable nonlocked read DIO to avoid the end less truncate */
4991 btrfs_inode_block_unlocked_dio(BTRFS_I(inode
));
4992 inode_dio_wait(inode
);
4993 btrfs_inode_resume_unlocked_dio(BTRFS_I(inode
));
4995 ret
= btrfs_truncate(inode
);
4996 if (ret
&& inode
->i_nlink
) {
4999 /* To get a stable disk_i_size */
5000 err
= btrfs_wait_ordered_range(inode
, 0, (u64
)-1);
5002 btrfs_orphan_del(NULL
, BTRFS_I(inode
));
5007 * failed to truncate, disk_i_size is only adjusted down
5008 * as we remove extents, so it should represent the true
5009 * size of the inode, so reset the in memory size and
5010 * delete our orphan entry.
5012 trans
= btrfs_join_transaction(root
);
5013 if (IS_ERR(trans
)) {
5014 btrfs_orphan_del(NULL
, BTRFS_I(inode
));
5017 i_size_write(inode
, BTRFS_I(inode
)->disk_i_size
);
5018 err
= btrfs_orphan_del(trans
, BTRFS_I(inode
));
5020 btrfs_abort_transaction(trans
, err
);
5021 btrfs_end_transaction(trans
);
5028 static int btrfs_setattr(struct dentry
*dentry
, struct iattr
*attr
)
5030 struct inode
*inode
= d_inode(dentry
);
5031 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
5034 if (btrfs_root_readonly(root
))
5037 err
= setattr_prepare(dentry
, attr
);
5041 if (S_ISREG(inode
->i_mode
) && (attr
->ia_valid
& ATTR_SIZE
)) {
5042 err
= btrfs_setsize(inode
, attr
);
5047 if (attr
->ia_valid
) {
5048 setattr_copy(inode
, attr
);
5049 inode_inc_iversion(inode
);
5050 err
= btrfs_dirty_inode(inode
);
5052 if (!err
&& attr
->ia_valid
& ATTR_MODE
)
5053 err
= posix_acl_chmod(inode
, inode
->i_mode
);
5060 * While truncating the inode pages during eviction, we get the VFS calling
5061 * btrfs_invalidatepage() against each page of the inode. This is slow because
5062 * the calls to btrfs_invalidatepage() result in a huge amount of calls to
5063 * lock_extent_bits() and clear_extent_bit(), which keep merging and splitting
5064 * extent_state structures over and over, wasting lots of time.
5066 * Therefore if the inode is being evicted, let btrfs_invalidatepage() skip all
5067 * those expensive operations on a per page basis and do only the ordered io
5068 * finishing, while we release here the extent_map and extent_state structures,
5069 * without the excessive merging and splitting.
5071 static void evict_inode_truncate_pages(struct inode
*inode
)
5073 struct extent_io_tree
*io_tree
= &BTRFS_I(inode
)->io_tree
;
5074 struct extent_map_tree
*map_tree
= &BTRFS_I(inode
)->extent_tree
;
5075 struct rb_node
*node
;
5077 ASSERT(inode
->i_state
& I_FREEING
);
5078 truncate_inode_pages_final(&inode
->i_data
);
5080 write_lock(&map_tree
->lock
);
5081 while (!RB_EMPTY_ROOT(&map_tree
->map
)) {
5082 struct extent_map
*em
;
5084 node
= rb_first(&map_tree
->map
);
5085 em
= rb_entry(node
, struct extent_map
, rb_node
);
5086 clear_bit(EXTENT_FLAG_PINNED
, &em
->flags
);
5087 clear_bit(EXTENT_FLAG_LOGGING
, &em
->flags
);
5088 remove_extent_mapping(map_tree
, em
);
5089 free_extent_map(em
);
5090 if (need_resched()) {
5091 write_unlock(&map_tree
->lock
);
5093 write_lock(&map_tree
->lock
);
5096 write_unlock(&map_tree
->lock
);
5099 * Keep looping until we have no more ranges in the io tree.
5100 * We can have ongoing bios started by readpages (called from readahead)
5101 * that have their endio callback (extent_io.c:end_bio_extent_readpage)
5102 * still in progress (unlocked the pages in the bio but did not yet
5103 * unlocked the ranges in the io tree). Therefore this means some
5104 * ranges can still be locked and eviction started because before
5105 * submitting those bios, which are executed by a separate task (work
5106 * queue kthread), inode references (inode->i_count) were not taken
5107 * (which would be dropped in the end io callback of each bio).
5108 * Therefore here we effectively end up waiting for those bios and
5109 * anyone else holding locked ranges without having bumped the inode's
5110 * reference count - if we don't do it, when they access the inode's
5111 * io_tree to unlock a range it may be too late, leading to an
5112 * use-after-free issue.
5114 spin_lock(&io_tree
->lock
);
5115 while (!RB_EMPTY_ROOT(&io_tree
->state
)) {
5116 struct extent_state
*state
;
5117 struct extent_state
*cached_state
= NULL
;
5121 node
= rb_first(&io_tree
->state
);
5122 state
= rb_entry(node
, struct extent_state
, rb_node
);
5123 start
= state
->start
;
5125 spin_unlock(&io_tree
->lock
);
5127 lock_extent_bits(io_tree
, start
, end
, &cached_state
);
5130 * If still has DELALLOC flag, the extent didn't reach disk,
5131 * and its reserved space won't be freed by delayed_ref.
5132 * So we need to free its reserved space here.
5133 * (Refer to comment in btrfs_invalidatepage, case 2)
5135 * Note, end is the bytenr of last byte, so we need + 1 here.
5137 if (state
->state
& EXTENT_DELALLOC
)
5138 btrfs_qgroup_free_data(inode
, start
, end
- start
+ 1);
5140 clear_extent_bit(io_tree
, start
, end
,
5141 EXTENT_LOCKED
| EXTENT_DIRTY
|
5142 EXTENT_DELALLOC
| EXTENT_DO_ACCOUNTING
|
5143 EXTENT_DEFRAG
, 1, 1,
5144 &cached_state
, GFP_NOFS
);
5147 spin_lock(&io_tree
->lock
);
5149 spin_unlock(&io_tree
->lock
);
5152 void btrfs_evict_inode(struct inode
*inode
)
5154 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
5155 struct btrfs_trans_handle
*trans
;
5156 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
5157 struct btrfs_block_rsv
*rsv
, *global_rsv
;
5158 int steal_from_global
= 0;
5162 trace_btrfs_inode_evict(inode
);
5165 kmem_cache_free(btrfs_inode_cachep
, BTRFS_I(inode
));
5169 min_size
= btrfs_calc_trunc_metadata_size(fs_info
, 1);
5171 evict_inode_truncate_pages(inode
);
5173 if (inode
->i_nlink
&&
5174 ((btrfs_root_refs(&root
->root_item
) != 0 &&
5175 root
->root_key
.objectid
!= BTRFS_ROOT_TREE_OBJECTID
) ||
5176 btrfs_is_free_space_inode(BTRFS_I(inode
))))
5179 if (is_bad_inode(inode
)) {
5180 btrfs_orphan_del(NULL
, BTRFS_I(inode
));
5183 /* do we really want it for ->i_nlink > 0 and zero btrfs_root_refs? */
5184 if (!special_file(inode
->i_mode
))
5185 btrfs_wait_ordered_range(inode
, 0, (u64
)-1);
5187 btrfs_free_io_failure_record(BTRFS_I(inode
), 0, (u64
)-1);
5189 if (test_bit(BTRFS_FS_LOG_RECOVERING
, &fs_info
->flags
)) {
5190 BUG_ON(test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM
,
5191 &BTRFS_I(inode
)->runtime_flags
));
5195 if (inode
->i_nlink
> 0) {
5196 BUG_ON(btrfs_root_refs(&root
->root_item
) != 0 &&
5197 root
->root_key
.objectid
!= BTRFS_ROOT_TREE_OBJECTID
);
5201 ret
= btrfs_commit_inode_delayed_inode(BTRFS_I(inode
));
5203 btrfs_orphan_del(NULL
, BTRFS_I(inode
));
5207 rsv
= btrfs_alloc_block_rsv(fs_info
, BTRFS_BLOCK_RSV_TEMP
);
5209 btrfs_orphan_del(NULL
, BTRFS_I(inode
));
5212 rsv
->size
= min_size
;
5214 global_rsv
= &fs_info
->global_block_rsv
;
5216 btrfs_i_size_write(BTRFS_I(inode
), 0);
5219 * This is a bit simpler than btrfs_truncate since we've already
5220 * reserved our space for our orphan item in the unlink, so we just
5221 * need to reserve some slack space in case we add bytes and update
5222 * inode item when doing the truncate.
5225 ret
= btrfs_block_rsv_refill(root
, rsv
, min_size
,
5226 BTRFS_RESERVE_FLUSH_LIMIT
);
5229 * Try and steal from the global reserve since we will
5230 * likely not use this space anyway, we want to try as
5231 * hard as possible to get this to work.
5234 steal_from_global
++;
5236 steal_from_global
= 0;
5240 * steal_from_global == 0: we reserved stuff, hooray!
5241 * steal_from_global == 1: we didn't reserve stuff, boo!
5242 * steal_from_global == 2: we've committed, still not a lot of
5243 * room but maybe we'll have room in the global reserve this
5245 * steal_from_global == 3: abandon all hope!
5247 if (steal_from_global
> 2) {
5249 "Could not get space for a delete, will truncate on mount %d",
5251 btrfs_orphan_del(NULL
, BTRFS_I(inode
));
5252 btrfs_free_block_rsv(fs_info
, rsv
);
5256 trans
= btrfs_join_transaction(root
);
5257 if (IS_ERR(trans
)) {
5258 btrfs_orphan_del(NULL
, BTRFS_I(inode
));
5259 btrfs_free_block_rsv(fs_info
, rsv
);
5264 * We can't just steal from the global reserve, we need to make
5265 * sure there is room to do it, if not we need to commit and try
5268 if (steal_from_global
) {
5269 if (!btrfs_check_space_for_delayed_refs(trans
, fs_info
))
5270 ret
= btrfs_block_rsv_migrate(global_rsv
, rsv
,
5277 * Couldn't steal from the global reserve, we have too much
5278 * pending stuff built up, commit the transaction and try it
5282 ret
= btrfs_commit_transaction(trans
);
5284 btrfs_orphan_del(NULL
, BTRFS_I(inode
));
5285 btrfs_free_block_rsv(fs_info
, rsv
);
5290 steal_from_global
= 0;
5293 trans
->block_rsv
= rsv
;
5295 ret
= btrfs_truncate_inode_items(trans
, root
, inode
, 0, 0);
5296 if (ret
!= -ENOSPC
&& ret
!= -EAGAIN
)
5299 trans
->block_rsv
= &fs_info
->trans_block_rsv
;
5300 btrfs_end_transaction(trans
);
5302 btrfs_btree_balance_dirty(fs_info
);
5305 btrfs_free_block_rsv(fs_info
, rsv
);
5308 * Errors here aren't a big deal, it just means we leave orphan items
5309 * in the tree. They will be cleaned up on the next mount.
5312 trans
->block_rsv
= root
->orphan_block_rsv
;
5313 btrfs_orphan_del(trans
, BTRFS_I(inode
));
5315 btrfs_orphan_del(NULL
, BTRFS_I(inode
));
5318 trans
->block_rsv
= &fs_info
->trans_block_rsv
;
5319 if (!(root
== fs_info
->tree_root
||
5320 root
->root_key
.objectid
== BTRFS_TREE_RELOC_OBJECTID
))
5321 btrfs_return_ino(root
, btrfs_ino(BTRFS_I(inode
)));
5323 btrfs_end_transaction(trans
);
5324 btrfs_btree_balance_dirty(fs_info
);
5326 btrfs_remove_delayed_node(BTRFS_I(inode
));
5331 * this returns the key found in the dir entry in the location pointer.
5332 * If no dir entries were found, location->objectid is 0.
5334 static int btrfs_inode_by_name(struct inode
*dir
, struct dentry
*dentry
,
5335 struct btrfs_key
*location
)
5337 const char *name
= dentry
->d_name
.name
;
5338 int namelen
= dentry
->d_name
.len
;
5339 struct btrfs_dir_item
*di
;
5340 struct btrfs_path
*path
;
5341 struct btrfs_root
*root
= BTRFS_I(dir
)->root
;
5344 path
= btrfs_alloc_path();
5348 di
= btrfs_lookup_dir_item(NULL
, root
, path
, btrfs_ino(BTRFS_I(dir
)),
5353 if (IS_ERR_OR_NULL(di
))
5356 btrfs_dir_item_key_to_cpu(path
->nodes
[0], di
, location
);
5358 btrfs_free_path(path
);
5361 location
->objectid
= 0;
5366 * when we hit a tree root in a directory, the btrfs part of the inode
5367 * needs to be changed to reflect the root directory of the tree root. This
5368 * is kind of like crossing a mount point.
5370 static int fixup_tree_root_location(struct btrfs_fs_info
*fs_info
,
5372 struct dentry
*dentry
,
5373 struct btrfs_key
*location
,
5374 struct btrfs_root
**sub_root
)
5376 struct btrfs_path
*path
;
5377 struct btrfs_root
*new_root
;
5378 struct btrfs_root_ref
*ref
;
5379 struct extent_buffer
*leaf
;
5380 struct btrfs_key key
;
5384 path
= btrfs_alloc_path();
5391 key
.objectid
= BTRFS_I(dir
)->root
->root_key
.objectid
;
5392 key
.type
= BTRFS_ROOT_REF_KEY
;
5393 key
.offset
= location
->objectid
;
5395 ret
= btrfs_search_slot(NULL
, fs_info
->tree_root
, &key
, path
, 0, 0);
5402 leaf
= path
->nodes
[0];
5403 ref
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_root_ref
);
5404 if (btrfs_root_ref_dirid(leaf
, ref
) != btrfs_ino(BTRFS_I(dir
)) ||
5405 btrfs_root_ref_name_len(leaf
, ref
) != dentry
->d_name
.len
)
5408 ret
= memcmp_extent_buffer(leaf
, dentry
->d_name
.name
,
5409 (unsigned long)(ref
+ 1),
5410 dentry
->d_name
.len
);
5414 btrfs_release_path(path
);
5416 new_root
= btrfs_read_fs_root_no_name(fs_info
, location
);
5417 if (IS_ERR(new_root
)) {
5418 err
= PTR_ERR(new_root
);
5422 *sub_root
= new_root
;
5423 location
->objectid
= btrfs_root_dirid(&new_root
->root_item
);
5424 location
->type
= BTRFS_INODE_ITEM_KEY
;
5425 location
->offset
= 0;
5428 btrfs_free_path(path
);
5432 static void inode_tree_add(struct inode
*inode
)
5434 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
5435 struct btrfs_inode
*entry
;
5437 struct rb_node
*parent
;
5438 struct rb_node
*new = &BTRFS_I(inode
)->rb_node
;
5439 u64 ino
= btrfs_ino(BTRFS_I(inode
));
5441 if (inode_unhashed(inode
))
5444 spin_lock(&root
->inode_lock
);
5445 p
= &root
->inode_tree
.rb_node
;
5448 entry
= rb_entry(parent
, struct btrfs_inode
, rb_node
);
5450 if (ino
< btrfs_ino(BTRFS_I(&entry
->vfs_inode
)))
5451 p
= &parent
->rb_left
;
5452 else if (ino
> btrfs_ino(BTRFS_I(&entry
->vfs_inode
)))
5453 p
= &parent
->rb_right
;
5455 WARN_ON(!(entry
->vfs_inode
.i_state
&
5456 (I_WILL_FREE
| I_FREEING
)));
5457 rb_replace_node(parent
, new, &root
->inode_tree
);
5458 RB_CLEAR_NODE(parent
);
5459 spin_unlock(&root
->inode_lock
);
5463 rb_link_node(new, parent
, p
);
5464 rb_insert_color(new, &root
->inode_tree
);
5465 spin_unlock(&root
->inode_lock
);
5468 static void inode_tree_del(struct inode
*inode
)
5470 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
5471 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
5474 spin_lock(&root
->inode_lock
);
5475 if (!RB_EMPTY_NODE(&BTRFS_I(inode
)->rb_node
)) {
5476 rb_erase(&BTRFS_I(inode
)->rb_node
, &root
->inode_tree
);
5477 RB_CLEAR_NODE(&BTRFS_I(inode
)->rb_node
);
5478 empty
= RB_EMPTY_ROOT(&root
->inode_tree
);
5480 spin_unlock(&root
->inode_lock
);
5482 if (empty
&& btrfs_root_refs(&root
->root_item
) == 0) {
5483 synchronize_srcu(&fs_info
->subvol_srcu
);
5484 spin_lock(&root
->inode_lock
);
5485 empty
= RB_EMPTY_ROOT(&root
->inode_tree
);
5486 spin_unlock(&root
->inode_lock
);
5488 btrfs_add_dead_root(root
);
5492 void btrfs_invalidate_inodes(struct btrfs_root
*root
)
5494 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
5495 struct rb_node
*node
;
5496 struct rb_node
*prev
;
5497 struct btrfs_inode
*entry
;
5498 struct inode
*inode
;
5501 if (!test_bit(BTRFS_FS_STATE_ERROR
, &fs_info
->fs_state
))
5502 WARN_ON(btrfs_root_refs(&root
->root_item
) != 0);
5504 spin_lock(&root
->inode_lock
);
5506 node
= root
->inode_tree
.rb_node
;
5510 entry
= rb_entry(node
, struct btrfs_inode
, rb_node
);
5512 if (objectid
< btrfs_ino(BTRFS_I(&entry
->vfs_inode
)))
5513 node
= node
->rb_left
;
5514 else if (objectid
> btrfs_ino(BTRFS_I(&entry
->vfs_inode
)))
5515 node
= node
->rb_right
;
5521 entry
= rb_entry(prev
, struct btrfs_inode
, rb_node
);
5522 if (objectid
<= btrfs_ino(BTRFS_I(&entry
->vfs_inode
))) {
5526 prev
= rb_next(prev
);
5530 entry
= rb_entry(node
, struct btrfs_inode
, rb_node
);
5531 objectid
= btrfs_ino(BTRFS_I(&entry
->vfs_inode
)) + 1;
5532 inode
= igrab(&entry
->vfs_inode
);
5534 spin_unlock(&root
->inode_lock
);
5535 if (atomic_read(&inode
->i_count
) > 1)
5536 d_prune_aliases(inode
);
5538 * btrfs_drop_inode will have it removed from
5539 * the inode cache when its usage count
5544 spin_lock(&root
->inode_lock
);
5548 if (cond_resched_lock(&root
->inode_lock
))
5551 node
= rb_next(node
);
5553 spin_unlock(&root
->inode_lock
);
5556 static int btrfs_init_locked_inode(struct inode
*inode
, void *p
)
5558 struct btrfs_iget_args
*args
= p
;
5559 inode
->i_ino
= args
->location
->objectid
;
5560 memcpy(&BTRFS_I(inode
)->location
, args
->location
,
5561 sizeof(*args
->location
));
5562 BTRFS_I(inode
)->root
= args
->root
;
5566 static int btrfs_find_actor(struct inode
*inode
, void *opaque
)
5568 struct btrfs_iget_args
*args
= opaque
;
5569 return args
->location
->objectid
== BTRFS_I(inode
)->location
.objectid
&&
5570 args
->root
== BTRFS_I(inode
)->root
;
5573 static struct inode
*btrfs_iget_locked(struct super_block
*s
,
5574 struct btrfs_key
*location
,
5575 struct btrfs_root
*root
)
5577 struct inode
*inode
;
5578 struct btrfs_iget_args args
;
5579 unsigned long hashval
= btrfs_inode_hash(location
->objectid
, root
);
5581 args
.location
= location
;
5584 inode
= iget5_locked(s
, hashval
, btrfs_find_actor
,
5585 btrfs_init_locked_inode
,
5590 /* Get an inode object given its location and corresponding root.
5591 * Returns in *is_new if the inode was read from disk
5593 struct inode
*btrfs_iget(struct super_block
*s
, struct btrfs_key
*location
,
5594 struct btrfs_root
*root
, int *new)
5596 struct inode
*inode
;
5598 inode
= btrfs_iget_locked(s
, location
, root
);
5600 return ERR_PTR(-ENOMEM
);
5602 if (inode
->i_state
& I_NEW
) {
5605 ret
= btrfs_read_locked_inode(inode
);
5606 if (!is_bad_inode(inode
)) {
5607 inode_tree_add(inode
);
5608 unlock_new_inode(inode
);
5612 unlock_new_inode(inode
);
5615 inode
= ERR_PTR(ret
< 0 ? ret
: -ESTALE
);
5622 static struct inode
*new_simple_dir(struct super_block
*s
,
5623 struct btrfs_key
*key
,
5624 struct btrfs_root
*root
)
5626 struct inode
*inode
= new_inode(s
);
5629 return ERR_PTR(-ENOMEM
);
5631 BTRFS_I(inode
)->root
= root
;
5632 memcpy(&BTRFS_I(inode
)->location
, key
, sizeof(*key
));
5633 set_bit(BTRFS_INODE_DUMMY
, &BTRFS_I(inode
)->runtime_flags
);
5635 inode
->i_ino
= BTRFS_EMPTY_SUBVOL_DIR_OBJECTID
;
5636 inode
->i_op
= &btrfs_dir_ro_inode_operations
;
5637 inode
->i_opflags
&= ~IOP_XATTR
;
5638 inode
->i_fop
= &simple_dir_operations
;
5639 inode
->i_mode
= S_IFDIR
| S_IRUGO
| S_IWUSR
| S_IXUGO
;
5640 inode
->i_mtime
= current_time(inode
);
5641 inode
->i_atime
= inode
->i_mtime
;
5642 inode
->i_ctime
= inode
->i_mtime
;
5643 BTRFS_I(inode
)->i_otime
= inode
->i_mtime
;
5648 struct inode
*btrfs_lookup_dentry(struct inode
*dir
, struct dentry
*dentry
)
5650 struct btrfs_fs_info
*fs_info
= btrfs_sb(dir
->i_sb
);
5651 struct inode
*inode
;
5652 struct btrfs_root
*root
= BTRFS_I(dir
)->root
;
5653 struct btrfs_root
*sub_root
= root
;
5654 struct btrfs_key location
;
5658 if (dentry
->d_name
.len
> BTRFS_NAME_LEN
)
5659 return ERR_PTR(-ENAMETOOLONG
);
5661 ret
= btrfs_inode_by_name(dir
, dentry
, &location
);
5663 return ERR_PTR(ret
);
5665 if (location
.objectid
== 0)
5666 return ERR_PTR(-ENOENT
);
5668 if (location
.type
== BTRFS_INODE_ITEM_KEY
) {
5669 inode
= btrfs_iget(dir
->i_sb
, &location
, root
, NULL
);
5673 BUG_ON(location
.type
!= BTRFS_ROOT_ITEM_KEY
);
5675 index
= srcu_read_lock(&fs_info
->subvol_srcu
);
5676 ret
= fixup_tree_root_location(fs_info
, dir
, dentry
,
5677 &location
, &sub_root
);
5680 inode
= ERR_PTR(ret
);
5682 inode
= new_simple_dir(dir
->i_sb
, &location
, sub_root
);
5684 inode
= btrfs_iget(dir
->i_sb
, &location
, sub_root
, NULL
);
5686 srcu_read_unlock(&fs_info
->subvol_srcu
, index
);
5688 if (!IS_ERR(inode
) && root
!= sub_root
) {
5689 down_read(&fs_info
->cleanup_work_sem
);
5690 if (!(inode
->i_sb
->s_flags
& MS_RDONLY
))
5691 ret
= btrfs_orphan_cleanup(sub_root
);
5692 up_read(&fs_info
->cleanup_work_sem
);
5695 inode
= ERR_PTR(ret
);
5702 static int btrfs_dentry_delete(const struct dentry
*dentry
)
5704 struct btrfs_root
*root
;
5705 struct inode
*inode
= d_inode(dentry
);
5707 if (!inode
&& !IS_ROOT(dentry
))
5708 inode
= d_inode(dentry
->d_parent
);
5711 root
= BTRFS_I(inode
)->root
;
5712 if (btrfs_root_refs(&root
->root_item
) == 0)
5715 if (btrfs_ino(BTRFS_I(inode
)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID
)
5721 static void btrfs_dentry_release(struct dentry
*dentry
)
5723 kfree(dentry
->d_fsdata
);
5726 static struct dentry
*btrfs_lookup(struct inode
*dir
, struct dentry
*dentry
,
5729 struct inode
*inode
;
5731 inode
= btrfs_lookup_dentry(dir
, dentry
);
5732 if (IS_ERR(inode
)) {
5733 if (PTR_ERR(inode
) == -ENOENT
)
5736 return ERR_CAST(inode
);
5739 return d_splice_alias(inode
, dentry
);
5742 unsigned char btrfs_filetype_table
[] = {
5743 DT_UNKNOWN
, DT_REG
, DT_DIR
, DT_CHR
, DT_BLK
, DT_FIFO
, DT_SOCK
, DT_LNK
5746 static int btrfs_real_readdir(struct file
*file
, struct dir_context
*ctx
)
5748 struct inode
*inode
= file_inode(file
);
5749 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
5750 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
5751 struct btrfs_item
*item
;
5752 struct btrfs_dir_item
*di
;
5753 struct btrfs_key key
;
5754 struct btrfs_key found_key
;
5755 struct btrfs_path
*path
;
5756 struct list_head ins_list
;
5757 struct list_head del_list
;
5759 struct extent_buffer
*leaf
;
5761 unsigned char d_type
;
5767 struct btrfs_key location
;
5769 if (!dir_emit_dots(file
, ctx
))
5772 path
= btrfs_alloc_path();
5776 path
->reada
= READA_FORWARD
;
5778 INIT_LIST_HEAD(&ins_list
);
5779 INIT_LIST_HEAD(&del_list
);
5780 put
= btrfs_readdir_get_delayed_items(inode
, &ins_list
, &del_list
);
5782 key
.type
= BTRFS_DIR_INDEX_KEY
;
5783 key
.offset
= ctx
->pos
;
5784 key
.objectid
= btrfs_ino(BTRFS_I(inode
));
5786 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
5791 leaf
= path
->nodes
[0];
5792 slot
= path
->slots
[0];
5793 if (slot
>= btrfs_header_nritems(leaf
)) {
5794 ret
= btrfs_next_leaf(root
, path
);
5802 item
= btrfs_item_nr(slot
);
5803 btrfs_item_key_to_cpu(leaf
, &found_key
, slot
);
5805 if (found_key
.objectid
!= key
.objectid
)
5807 if (found_key
.type
!= BTRFS_DIR_INDEX_KEY
)
5809 if (found_key
.offset
< ctx
->pos
)
5811 if (btrfs_should_delete_dir_index(&del_list
, found_key
.offset
))
5814 ctx
->pos
= found_key
.offset
;
5816 di
= btrfs_item_ptr(leaf
, slot
, struct btrfs_dir_item
);
5817 if (verify_dir_item(fs_info
, leaf
, di
))
5820 name_len
= btrfs_dir_name_len(leaf
, di
);
5821 if (name_len
<= sizeof(tmp_name
)) {
5822 name_ptr
= tmp_name
;
5824 name_ptr
= kmalloc(name_len
, GFP_KERNEL
);
5830 read_extent_buffer(leaf
, name_ptr
, (unsigned long)(di
+ 1),
5833 d_type
= btrfs_filetype_table
[btrfs_dir_type(leaf
, di
)];
5834 btrfs_dir_item_key_to_cpu(leaf
, di
, &location
);
5836 over
= !dir_emit(ctx
, name_ptr
, name_len
, location
.objectid
,
5839 if (name_ptr
!= tmp_name
)
5849 ret
= btrfs_readdir_delayed_dir_index(ctx
, &ins_list
);
5854 * Stop new entries from being returned after we return the last
5857 * New directory entries are assigned a strictly increasing
5858 * offset. This means that new entries created during readdir
5859 * are *guaranteed* to be seen in the future by that readdir.
5860 * This has broken buggy programs which operate on names as
5861 * they're returned by readdir. Until we re-use freed offsets
5862 * we have this hack to stop new entries from being returned
5863 * under the assumption that they'll never reach this huge
5866 * This is being careful not to overflow 32bit loff_t unless the
5867 * last entry requires it because doing so has broken 32bit apps
5870 if (ctx
->pos
>= INT_MAX
)
5871 ctx
->pos
= LLONG_MAX
;
5878 btrfs_readdir_put_delayed_items(inode
, &ins_list
, &del_list
);
5879 btrfs_free_path(path
);
5883 int btrfs_write_inode(struct inode
*inode
, struct writeback_control
*wbc
)
5885 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
5886 struct btrfs_trans_handle
*trans
;
5888 bool nolock
= false;
5890 if (test_bit(BTRFS_INODE_DUMMY
, &BTRFS_I(inode
)->runtime_flags
))
5893 if (btrfs_fs_closing(root
->fs_info
) &&
5894 btrfs_is_free_space_inode(BTRFS_I(inode
)))
5897 if (wbc
->sync_mode
== WB_SYNC_ALL
) {
5899 trans
= btrfs_join_transaction_nolock(root
);
5901 trans
= btrfs_join_transaction(root
);
5903 return PTR_ERR(trans
);
5904 ret
= btrfs_commit_transaction(trans
);
5910 * This is somewhat expensive, updating the tree every time the
5911 * inode changes. But, it is most likely to find the inode in cache.
5912 * FIXME, needs more benchmarking...there are no reasons other than performance
5913 * to keep or drop this code.
5915 static int btrfs_dirty_inode(struct inode
*inode
)
5917 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
5918 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
5919 struct btrfs_trans_handle
*trans
;
5922 if (test_bit(BTRFS_INODE_DUMMY
, &BTRFS_I(inode
)->runtime_flags
))
5925 trans
= btrfs_join_transaction(root
);
5927 return PTR_ERR(trans
);
5929 ret
= btrfs_update_inode(trans
, root
, inode
);
5930 if (ret
&& ret
== -ENOSPC
) {
5931 /* whoops, lets try again with the full transaction */
5932 btrfs_end_transaction(trans
);
5933 trans
= btrfs_start_transaction(root
, 1);
5935 return PTR_ERR(trans
);
5937 ret
= btrfs_update_inode(trans
, root
, inode
);
5939 btrfs_end_transaction(trans
);
5940 if (BTRFS_I(inode
)->delayed_node
)
5941 btrfs_balance_delayed_items(fs_info
);
5947 * This is a copy of file_update_time. We need this so we can return error on
5948 * ENOSPC for updating the inode in the case of file write and mmap writes.
5950 static int btrfs_update_time(struct inode
*inode
, struct timespec
*now
,
5953 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
5955 if (btrfs_root_readonly(root
))
5958 if (flags
& S_VERSION
)
5959 inode_inc_iversion(inode
);
5960 if (flags
& S_CTIME
)
5961 inode
->i_ctime
= *now
;
5962 if (flags
& S_MTIME
)
5963 inode
->i_mtime
= *now
;
5964 if (flags
& S_ATIME
)
5965 inode
->i_atime
= *now
;
5966 return btrfs_dirty_inode(inode
);
5970 * find the highest existing sequence number in a directory
5971 * and then set the in-memory index_cnt variable to reflect
5972 * free sequence numbers
5974 static int btrfs_set_inode_index_count(struct btrfs_inode
*inode
)
5976 struct btrfs_root
*root
= inode
->root
;
5977 struct btrfs_key key
, found_key
;
5978 struct btrfs_path
*path
;
5979 struct extent_buffer
*leaf
;
5982 key
.objectid
= btrfs_ino(inode
);
5983 key
.type
= BTRFS_DIR_INDEX_KEY
;
5984 key
.offset
= (u64
)-1;
5986 path
= btrfs_alloc_path();
5990 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
5993 /* FIXME: we should be able to handle this */
5999 * MAGIC NUMBER EXPLANATION:
6000 * since we search a directory based on f_pos we have to start at 2
6001 * since '.' and '..' have f_pos of 0 and 1 respectively, so everybody
6002 * else has to start at 2
6004 if (path
->slots
[0] == 0) {
6005 inode
->index_cnt
= 2;
6011 leaf
= path
->nodes
[0];
6012 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
6014 if (found_key
.objectid
!= btrfs_ino(inode
) ||
6015 found_key
.type
!= BTRFS_DIR_INDEX_KEY
) {
6016 inode
->index_cnt
= 2;
6020 inode
->index_cnt
= found_key
.offset
+ 1;
6022 btrfs_free_path(path
);
6027 * helper to find a free sequence number in a given directory. This current
6028 * code is very simple, later versions will do smarter things in the btree
6030 int btrfs_set_inode_index(struct btrfs_inode
*dir
, u64
*index
)
6034 if (dir
->index_cnt
== (u64
)-1) {
6035 ret
= btrfs_inode_delayed_dir_index_count(dir
);
6037 ret
= btrfs_set_inode_index_count(dir
);
6043 *index
= dir
->index_cnt
;
6049 static int btrfs_insert_inode_locked(struct inode
*inode
)
6051 struct btrfs_iget_args args
;
6052 args
.location
= &BTRFS_I(inode
)->location
;
6053 args
.root
= BTRFS_I(inode
)->root
;
6055 return insert_inode_locked4(inode
,
6056 btrfs_inode_hash(inode
->i_ino
, BTRFS_I(inode
)->root
),
6057 btrfs_find_actor
, &args
);
6060 static struct inode
*btrfs_new_inode(struct btrfs_trans_handle
*trans
,
6061 struct btrfs_root
*root
,
6063 const char *name
, int name_len
,
6064 u64 ref_objectid
, u64 objectid
,
6065 umode_t mode
, u64
*index
)
6067 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
6068 struct inode
*inode
;
6069 struct btrfs_inode_item
*inode_item
;
6070 struct btrfs_key
*location
;
6071 struct btrfs_path
*path
;
6072 struct btrfs_inode_ref
*ref
;
6073 struct btrfs_key key
[2];
6075 int nitems
= name
? 2 : 1;
6079 path
= btrfs_alloc_path();
6081 return ERR_PTR(-ENOMEM
);
6083 inode
= new_inode(fs_info
->sb
);
6085 btrfs_free_path(path
);
6086 return ERR_PTR(-ENOMEM
);
6090 * O_TMPFILE, set link count to 0, so that after this point,
6091 * we fill in an inode item with the correct link count.
6094 set_nlink(inode
, 0);
6097 * we have to initialize this early, so we can reclaim the inode
6098 * number if we fail afterwards in this function.
6100 inode
->i_ino
= objectid
;
6103 trace_btrfs_inode_request(dir
);
6105 ret
= btrfs_set_inode_index(BTRFS_I(dir
), index
);
6107 btrfs_free_path(path
);
6109 return ERR_PTR(ret
);
6115 * index_cnt is ignored for everything but a dir,
6116 * btrfs_get_inode_index_count has an explanation for the magic
6119 BTRFS_I(inode
)->index_cnt
= 2;
6120 BTRFS_I(inode
)->dir_index
= *index
;
6121 BTRFS_I(inode
)->root
= root
;
6122 BTRFS_I(inode
)->generation
= trans
->transid
;
6123 inode
->i_generation
= BTRFS_I(inode
)->generation
;
6126 * We could have gotten an inode number from somebody who was fsynced
6127 * and then removed in this same transaction, so let's just set full
6128 * sync since it will be a full sync anyway and this will blow away the
6129 * old info in the log.
6131 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC
, &BTRFS_I(inode
)->runtime_flags
);
6133 key
[0].objectid
= objectid
;
6134 key
[0].type
= BTRFS_INODE_ITEM_KEY
;
6137 sizes
[0] = sizeof(struct btrfs_inode_item
);
6141 * Start new inodes with an inode_ref. This is slightly more
6142 * efficient for small numbers of hard links since they will
6143 * be packed into one item. Extended refs will kick in if we
6144 * add more hard links than can fit in the ref item.
6146 key
[1].objectid
= objectid
;
6147 key
[1].type
= BTRFS_INODE_REF_KEY
;
6148 key
[1].offset
= ref_objectid
;
6150 sizes
[1] = name_len
+ sizeof(*ref
);
6153 location
= &BTRFS_I(inode
)->location
;
6154 location
->objectid
= objectid
;
6155 location
->offset
= 0;
6156 location
->type
= BTRFS_INODE_ITEM_KEY
;
6158 ret
= btrfs_insert_inode_locked(inode
);
6162 path
->leave_spinning
= 1;
6163 ret
= btrfs_insert_empty_items(trans
, root
, path
, key
, sizes
, nitems
);
6167 inode_init_owner(inode
, dir
, mode
);
6168 inode_set_bytes(inode
, 0);
6170 inode
->i_mtime
= current_time(inode
);
6171 inode
->i_atime
= inode
->i_mtime
;
6172 inode
->i_ctime
= inode
->i_mtime
;
6173 BTRFS_I(inode
)->i_otime
= inode
->i_mtime
;
6175 inode_item
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
6176 struct btrfs_inode_item
);
6177 memzero_extent_buffer(path
->nodes
[0], (unsigned long)inode_item
,
6178 sizeof(*inode_item
));
6179 fill_inode_item(trans
, path
->nodes
[0], inode_item
, inode
);
6182 ref
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0] + 1,
6183 struct btrfs_inode_ref
);
6184 btrfs_set_inode_ref_name_len(path
->nodes
[0], ref
, name_len
);
6185 btrfs_set_inode_ref_index(path
->nodes
[0], ref
, *index
);
6186 ptr
= (unsigned long)(ref
+ 1);
6187 write_extent_buffer(path
->nodes
[0], name
, ptr
, name_len
);
6190 btrfs_mark_buffer_dirty(path
->nodes
[0]);
6191 btrfs_free_path(path
);
6193 btrfs_inherit_iflags(inode
, dir
);
6195 if (S_ISREG(mode
)) {
6196 if (btrfs_test_opt(fs_info
, NODATASUM
))
6197 BTRFS_I(inode
)->flags
|= BTRFS_INODE_NODATASUM
;
6198 if (btrfs_test_opt(fs_info
, NODATACOW
))
6199 BTRFS_I(inode
)->flags
|= BTRFS_INODE_NODATACOW
|
6200 BTRFS_INODE_NODATASUM
;
6203 inode_tree_add(inode
);
6205 trace_btrfs_inode_new(inode
);
6206 btrfs_set_inode_last_trans(trans
, inode
);
6208 btrfs_update_root_times(trans
, root
);
6210 ret
= btrfs_inode_inherit_props(trans
, inode
, dir
);
6213 "error inheriting props for ino %llu (root %llu): %d",
6214 btrfs_ino(BTRFS_I(inode
)), root
->root_key
.objectid
, ret
);
6219 unlock_new_inode(inode
);
6222 BTRFS_I(dir
)->index_cnt
--;
6223 btrfs_free_path(path
);
6225 return ERR_PTR(ret
);
6228 static inline u8
btrfs_inode_type(struct inode
*inode
)
6230 return btrfs_type_by_mode
[(inode
->i_mode
& S_IFMT
) >> S_SHIFT
];
6234 * utility function to add 'inode' into 'parent_inode' with
6235 * a give name and a given sequence number.
6236 * if 'add_backref' is true, also insert a backref from the
6237 * inode to the parent directory.
6239 int btrfs_add_link(struct btrfs_trans_handle
*trans
,
6240 struct btrfs_inode
*parent_inode
, struct btrfs_inode
*inode
,
6241 const char *name
, int name_len
, int add_backref
, u64 index
)
6243 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->vfs_inode
.i_sb
);
6245 struct btrfs_key key
;
6246 struct btrfs_root
*root
= parent_inode
->root
;
6247 u64 ino
= btrfs_ino(inode
);
6248 u64 parent_ino
= btrfs_ino(parent_inode
);
6250 if (unlikely(ino
== BTRFS_FIRST_FREE_OBJECTID
)) {
6251 memcpy(&key
, &inode
->root
->root_key
, sizeof(key
));
6254 key
.type
= BTRFS_INODE_ITEM_KEY
;
6258 if (unlikely(ino
== BTRFS_FIRST_FREE_OBJECTID
)) {
6259 ret
= btrfs_add_root_ref(trans
, fs_info
, key
.objectid
,
6260 root
->root_key
.objectid
, parent_ino
,
6261 index
, name
, name_len
);
6262 } else if (add_backref
) {
6263 ret
= btrfs_insert_inode_ref(trans
, root
, name
, name_len
, ino
,
6267 /* Nothing to clean up yet */
6271 ret
= btrfs_insert_dir_item(trans
, root
, name
, name_len
,
6273 btrfs_inode_type(&inode
->vfs_inode
), index
);
6274 if (ret
== -EEXIST
|| ret
== -EOVERFLOW
)
6277 btrfs_abort_transaction(trans
, ret
);
6281 btrfs_i_size_write(parent_inode
, parent_inode
->vfs_inode
.i_size
+
6283 inode_inc_iversion(&parent_inode
->vfs_inode
);
6284 parent_inode
->vfs_inode
.i_mtime
= parent_inode
->vfs_inode
.i_ctime
=
6285 current_time(&parent_inode
->vfs_inode
);
6286 ret
= btrfs_update_inode(trans
, root
, &parent_inode
->vfs_inode
);
6288 btrfs_abort_transaction(trans
, ret
);
6292 if (unlikely(ino
== BTRFS_FIRST_FREE_OBJECTID
)) {
6295 err
= btrfs_del_root_ref(trans
, fs_info
, key
.objectid
,
6296 root
->root_key
.objectid
, parent_ino
,
6297 &local_index
, name
, name_len
);
6299 } else if (add_backref
) {
6303 err
= btrfs_del_inode_ref(trans
, root
, name
, name_len
,
6304 ino
, parent_ino
, &local_index
);
6309 static int btrfs_add_nondir(struct btrfs_trans_handle
*trans
,
6310 struct btrfs_inode
*dir
, struct dentry
*dentry
,
6311 struct btrfs_inode
*inode
, int backref
, u64 index
)
6313 int err
= btrfs_add_link(trans
, dir
, inode
,
6314 dentry
->d_name
.name
, dentry
->d_name
.len
,
6321 static int btrfs_mknod(struct inode
*dir
, struct dentry
*dentry
,
6322 umode_t mode
, dev_t rdev
)
6324 struct btrfs_fs_info
*fs_info
= btrfs_sb(dir
->i_sb
);
6325 struct btrfs_trans_handle
*trans
;
6326 struct btrfs_root
*root
= BTRFS_I(dir
)->root
;
6327 struct inode
*inode
= NULL
;
6334 * 2 for inode item and ref
6336 * 1 for xattr if selinux is on
6338 trans
= btrfs_start_transaction(root
, 5);
6340 return PTR_ERR(trans
);
6342 err
= btrfs_find_free_ino(root
, &objectid
);
6346 inode
= btrfs_new_inode(trans
, root
, dir
, dentry
->d_name
.name
,
6347 dentry
->d_name
.len
, btrfs_ino(BTRFS_I(dir
)), objectid
,
6349 if (IS_ERR(inode
)) {
6350 err
= PTR_ERR(inode
);
6355 * If the active LSM wants to access the inode during
6356 * d_instantiate it needs these. Smack checks to see
6357 * if the filesystem supports xattrs by looking at the
6360 inode
->i_op
= &btrfs_special_inode_operations
;
6361 init_special_inode(inode
, inode
->i_mode
, rdev
);
6363 err
= btrfs_init_inode_security(trans
, inode
, dir
, &dentry
->d_name
);
6365 goto out_unlock_inode
;
6367 err
= btrfs_add_nondir(trans
, BTRFS_I(dir
), dentry
, BTRFS_I(inode
),
6370 goto out_unlock_inode
;
6372 btrfs_update_inode(trans
, root
, inode
);
6373 unlock_new_inode(inode
);
6374 d_instantiate(dentry
, inode
);
6378 btrfs_end_transaction(trans
);
6379 btrfs_balance_delayed_items(fs_info
);
6380 btrfs_btree_balance_dirty(fs_info
);
6382 inode_dec_link_count(inode
);
6389 unlock_new_inode(inode
);
6394 static int btrfs_create(struct inode
*dir
, struct dentry
*dentry
,
6395 umode_t mode
, bool excl
)
6397 struct btrfs_fs_info
*fs_info
= btrfs_sb(dir
->i_sb
);
6398 struct btrfs_trans_handle
*trans
;
6399 struct btrfs_root
*root
= BTRFS_I(dir
)->root
;
6400 struct inode
*inode
= NULL
;
6401 int drop_inode_on_err
= 0;
6407 * 2 for inode item and ref
6409 * 1 for xattr if selinux is on
6411 trans
= btrfs_start_transaction(root
, 5);
6413 return PTR_ERR(trans
);
6415 err
= btrfs_find_free_ino(root
, &objectid
);
6419 inode
= btrfs_new_inode(trans
, root
, dir
, dentry
->d_name
.name
,
6420 dentry
->d_name
.len
, btrfs_ino(BTRFS_I(dir
)), objectid
,
6422 if (IS_ERR(inode
)) {
6423 err
= PTR_ERR(inode
);
6426 drop_inode_on_err
= 1;
6428 * If the active LSM wants to access the inode during
6429 * d_instantiate it needs these. Smack checks to see
6430 * if the filesystem supports xattrs by looking at the
6433 inode
->i_fop
= &btrfs_file_operations
;
6434 inode
->i_op
= &btrfs_file_inode_operations
;
6435 inode
->i_mapping
->a_ops
= &btrfs_aops
;
6437 err
= btrfs_init_inode_security(trans
, inode
, dir
, &dentry
->d_name
);
6439 goto out_unlock_inode
;
6441 err
= btrfs_update_inode(trans
, root
, inode
);
6443 goto out_unlock_inode
;
6445 err
= btrfs_add_nondir(trans
, BTRFS_I(dir
), dentry
, BTRFS_I(inode
),
6448 goto out_unlock_inode
;
6450 BTRFS_I(inode
)->io_tree
.ops
= &btrfs_extent_io_ops
;
6451 unlock_new_inode(inode
);
6452 d_instantiate(dentry
, inode
);
6455 btrfs_end_transaction(trans
);
6456 if (err
&& drop_inode_on_err
) {
6457 inode_dec_link_count(inode
);
6460 btrfs_balance_delayed_items(fs_info
);
6461 btrfs_btree_balance_dirty(fs_info
);
6465 unlock_new_inode(inode
);
6470 static int btrfs_link(struct dentry
*old_dentry
, struct inode
*dir
,
6471 struct dentry
*dentry
)
6473 struct btrfs_trans_handle
*trans
= NULL
;
6474 struct btrfs_root
*root
= BTRFS_I(dir
)->root
;
6475 struct inode
*inode
= d_inode(old_dentry
);
6476 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
6481 /* do not allow sys_link's with other subvols of the same device */
6482 if (root
->objectid
!= BTRFS_I(inode
)->root
->objectid
)
6485 if (inode
->i_nlink
>= BTRFS_LINK_MAX
)
6488 err
= btrfs_set_inode_index(BTRFS_I(dir
), &index
);
6493 * 2 items for inode and inode ref
6494 * 2 items for dir items
6495 * 1 item for parent inode
6497 trans
= btrfs_start_transaction(root
, 5);
6498 if (IS_ERR(trans
)) {
6499 err
= PTR_ERR(trans
);
6504 /* There are several dir indexes for this inode, clear the cache. */
6505 BTRFS_I(inode
)->dir_index
= 0ULL;
6507 inode_inc_iversion(inode
);
6508 inode
->i_ctime
= current_time(inode
);
6510 set_bit(BTRFS_INODE_COPY_EVERYTHING
, &BTRFS_I(inode
)->runtime_flags
);
6512 err
= btrfs_add_nondir(trans
, BTRFS_I(dir
), dentry
, BTRFS_I(inode
),
6518 struct dentry
*parent
= dentry
->d_parent
;
6519 err
= btrfs_update_inode(trans
, root
, inode
);
6522 if (inode
->i_nlink
== 1) {
6524 * If new hard link count is 1, it's a file created
6525 * with open(2) O_TMPFILE flag.
6527 err
= btrfs_orphan_del(trans
, BTRFS_I(inode
));
6531 d_instantiate(dentry
, inode
);
6532 btrfs_log_new_name(trans
, BTRFS_I(inode
), NULL
, parent
);
6535 btrfs_balance_delayed_items(fs_info
);
6538 btrfs_end_transaction(trans
);
6540 inode_dec_link_count(inode
);
6543 btrfs_btree_balance_dirty(fs_info
);
6547 static int btrfs_mkdir(struct inode
*dir
, struct dentry
*dentry
, umode_t mode
)
6549 struct btrfs_fs_info
*fs_info
= btrfs_sb(dir
->i_sb
);
6550 struct inode
*inode
= NULL
;
6551 struct btrfs_trans_handle
*trans
;
6552 struct btrfs_root
*root
= BTRFS_I(dir
)->root
;
6554 int drop_on_err
= 0;
6559 * 2 items for inode and ref
6560 * 2 items for dir items
6561 * 1 for xattr if selinux is on
6563 trans
= btrfs_start_transaction(root
, 5);
6565 return PTR_ERR(trans
);
6567 err
= btrfs_find_free_ino(root
, &objectid
);
6571 inode
= btrfs_new_inode(trans
, root
, dir
, dentry
->d_name
.name
,
6572 dentry
->d_name
.len
, btrfs_ino(BTRFS_I(dir
)), objectid
,
6573 S_IFDIR
| mode
, &index
);
6574 if (IS_ERR(inode
)) {
6575 err
= PTR_ERR(inode
);
6580 /* these must be set before we unlock the inode */
6581 inode
->i_op
= &btrfs_dir_inode_operations
;
6582 inode
->i_fop
= &btrfs_dir_file_operations
;
6584 err
= btrfs_init_inode_security(trans
, inode
, dir
, &dentry
->d_name
);
6586 goto out_fail_inode
;
6588 btrfs_i_size_write(BTRFS_I(inode
), 0);
6589 err
= btrfs_update_inode(trans
, root
, inode
);
6591 goto out_fail_inode
;
6593 err
= btrfs_add_link(trans
, BTRFS_I(dir
), BTRFS_I(inode
),
6594 dentry
->d_name
.name
,
6595 dentry
->d_name
.len
, 0, index
);
6597 goto out_fail_inode
;
6599 d_instantiate(dentry
, inode
);
6601 * mkdir is special. We're unlocking after we call d_instantiate
6602 * to avoid a race with nfsd calling d_instantiate.
6604 unlock_new_inode(inode
);
6608 btrfs_end_transaction(trans
);
6610 inode_dec_link_count(inode
);
6613 btrfs_balance_delayed_items(fs_info
);
6614 btrfs_btree_balance_dirty(fs_info
);
6618 unlock_new_inode(inode
);
6622 /* Find next extent map of a given extent map, caller needs to ensure locks */
6623 static struct extent_map
*next_extent_map(struct extent_map
*em
)
6625 struct rb_node
*next
;
6627 next
= rb_next(&em
->rb_node
);
6630 return container_of(next
, struct extent_map
, rb_node
);
6633 static struct extent_map
*prev_extent_map(struct extent_map
*em
)
6635 struct rb_node
*prev
;
6637 prev
= rb_prev(&em
->rb_node
);
6640 return container_of(prev
, struct extent_map
, rb_node
);
6643 /* helper for btfs_get_extent. Given an existing extent in the tree,
6644 * the existing extent is the nearest extent to map_start,
6645 * and an extent that you want to insert, deal with overlap and insert
6646 * the best fitted new extent into the tree.
6648 static int merge_extent_mapping(struct extent_map_tree
*em_tree
,
6649 struct extent_map
*existing
,
6650 struct extent_map
*em
,
6653 struct extent_map
*prev
;
6654 struct extent_map
*next
;
6659 BUG_ON(map_start
< em
->start
|| map_start
>= extent_map_end(em
));
6661 if (existing
->start
> map_start
) {
6663 prev
= prev_extent_map(next
);
6666 next
= next_extent_map(prev
);
6669 start
= prev
? extent_map_end(prev
) : em
->start
;
6670 start
= max_t(u64
, start
, em
->start
);
6671 end
= next
? next
->start
: extent_map_end(em
);
6672 end
= min_t(u64
, end
, extent_map_end(em
));
6673 start_diff
= start
- em
->start
;
6675 em
->len
= end
- start
;
6676 if (em
->block_start
< EXTENT_MAP_LAST_BYTE
&&
6677 !test_bit(EXTENT_FLAG_COMPRESSED
, &em
->flags
)) {
6678 em
->block_start
+= start_diff
;
6679 em
->block_len
-= start_diff
;
6681 return add_extent_mapping(em_tree
, em
, 0);
6684 static noinline
int uncompress_inline(struct btrfs_path
*path
,
6686 size_t pg_offset
, u64 extent_offset
,
6687 struct btrfs_file_extent_item
*item
)
6690 struct extent_buffer
*leaf
= path
->nodes
[0];
6693 unsigned long inline_size
;
6697 WARN_ON(pg_offset
!= 0);
6698 compress_type
= btrfs_file_extent_compression(leaf
, item
);
6699 max_size
= btrfs_file_extent_ram_bytes(leaf
, item
);
6700 inline_size
= btrfs_file_extent_inline_item_len(leaf
,
6701 btrfs_item_nr(path
->slots
[0]));
6702 tmp
= kmalloc(inline_size
, GFP_NOFS
);
6705 ptr
= btrfs_file_extent_inline_start(item
);
6707 read_extent_buffer(leaf
, tmp
, ptr
, inline_size
);
6709 max_size
= min_t(unsigned long, PAGE_SIZE
, max_size
);
6710 ret
= btrfs_decompress(compress_type
, tmp
, page
,
6711 extent_offset
, inline_size
, max_size
);
6714 * decompression code contains a memset to fill in any space between the end
6715 * of the uncompressed data and the end of max_size in case the decompressed
6716 * data ends up shorter than ram_bytes. That doesn't cover the hole between
6717 * the end of an inline extent and the beginning of the next block, so we
6718 * cover that region here.
6721 if (max_size
+ pg_offset
< PAGE_SIZE
) {
6722 char *map
= kmap(page
);
6723 memset(map
+ pg_offset
+ max_size
, 0, PAGE_SIZE
- max_size
- pg_offset
);
6731 * a bit scary, this does extent mapping from logical file offset to the disk.
6732 * the ugly parts come from merging extents from the disk with the in-ram
6733 * representation. This gets more complex because of the data=ordered code,
6734 * where the in-ram extents might be locked pending data=ordered completion.
6736 * This also copies inline extents directly into the page.
6739 struct extent_map
*btrfs_get_extent(struct btrfs_inode
*inode
,
6741 size_t pg_offset
, u64 start
, u64 len
,
6744 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->vfs_inode
.i_sb
);
6747 u64 extent_start
= 0;
6749 u64 objectid
= btrfs_ino(inode
);
6751 struct btrfs_path
*path
= NULL
;
6752 struct btrfs_root
*root
= inode
->root
;
6753 struct btrfs_file_extent_item
*item
;
6754 struct extent_buffer
*leaf
;
6755 struct btrfs_key found_key
;
6756 struct extent_map
*em
= NULL
;
6757 struct extent_map_tree
*em_tree
= &inode
->extent_tree
;
6758 struct extent_io_tree
*io_tree
= &inode
->io_tree
;
6759 struct btrfs_trans_handle
*trans
= NULL
;
6760 const bool new_inline
= !page
|| create
;
6763 read_lock(&em_tree
->lock
);
6764 em
= lookup_extent_mapping(em_tree
, start
, len
);
6766 em
->bdev
= fs_info
->fs_devices
->latest_bdev
;
6767 read_unlock(&em_tree
->lock
);
6770 if (em
->start
> start
|| em
->start
+ em
->len
<= start
)
6771 free_extent_map(em
);
6772 else if (em
->block_start
== EXTENT_MAP_INLINE
&& page
)
6773 free_extent_map(em
);
6777 em
= alloc_extent_map();
6782 em
->bdev
= fs_info
->fs_devices
->latest_bdev
;
6783 em
->start
= EXTENT_MAP_HOLE
;
6784 em
->orig_start
= EXTENT_MAP_HOLE
;
6786 em
->block_len
= (u64
)-1;
6789 path
= btrfs_alloc_path();
6795 * Chances are we'll be called again, so go ahead and do
6798 path
->reada
= READA_FORWARD
;
6801 ret
= btrfs_lookup_file_extent(trans
, root
, path
,
6802 objectid
, start
, trans
!= NULL
);
6809 if (path
->slots
[0] == 0)
6814 leaf
= path
->nodes
[0];
6815 item
= btrfs_item_ptr(leaf
, path
->slots
[0],
6816 struct btrfs_file_extent_item
);
6817 /* are we inside the extent that was found? */
6818 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
6819 found_type
= found_key
.type
;
6820 if (found_key
.objectid
!= objectid
||
6821 found_type
!= BTRFS_EXTENT_DATA_KEY
) {
6823 * If we backup past the first extent we want to move forward
6824 * and see if there is an extent in front of us, otherwise we'll
6825 * say there is a hole for our whole search range which can
6832 found_type
= btrfs_file_extent_type(leaf
, item
);
6833 extent_start
= found_key
.offset
;
6834 if (found_type
== BTRFS_FILE_EXTENT_REG
||
6835 found_type
== BTRFS_FILE_EXTENT_PREALLOC
) {
6836 extent_end
= extent_start
+
6837 btrfs_file_extent_num_bytes(leaf
, item
);
6838 } else if (found_type
== BTRFS_FILE_EXTENT_INLINE
) {
6840 size
= btrfs_file_extent_inline_len(leaf
, path
->slots
[0], item
);
6841 extent_end
= ALIGN(extent_start
+ size
,
6842 fs_info
->sectorsize
);
6845 if (start
>= extent_end
) {
6847 if (path
->slots
[0] >= btrfs_header_nritems(leaf
)) {
6848 ret
= btrfs_next_leaf(root
, path
);
6855 leaf
= path
->nodes
[0];
6857 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
6858 if (found_key
.objectid
!= objectid
||
6859 found_key
.type
!= BTRFS_EXTENT_DATA_KEY
)
6861 if (start
+ len
<= found_key
.offset
)
6863 if (start
> found_key
.offset
)
6866 em
->orig_start
= start
;
6867 em
->len
= found_key
.offset
- start
;
6871 btrfs_extent_item_to_extent_map(inode
, path
, item
,
6874 if (found_type
== BTRFS_FILE_EXTENT_REG
||
6875 found_type
== BTRFS_FILE_EXTENT_PREALLOC
) {
6877 } else if (found_type
== BTRFS_FILE_EXTENT_INLINE
) {
6881 size_t extent_offset
;
6887 size
= btrfs_file_extent_inline_len(leaf
, path
->slots
[0], item
);
6888 extent_offset
= page_offset(page
) + pg_offset
- extent_start
;
6889 copy_size
= min_t(u64
, PAGE_SIZE
- pg_offset
,
6890 size
- extent_offset
);
6891 em
->start
= extent_start
+ extent_offset
;
6892 em
->len
= ALIGN(copy_size
, fs_info
->sectorsize
);
6893 em
->orig_block_len
= em
->len
;
6894 em
->orig_start
= em
->start
;
6895 ptr
= btrfs_file_extent_inline_start(item
) + extent_offset
;
6896 if (create
== 0 && !PageUptodate(page
)) {
6897 if (btrfs_file_extent_compression(leaf
, item
) !=
6898 BTRFS_COMPRESS_NONE
) {
6899 ret
= uncompress_inline(path
, page
, pg_offset
,
6900 extent_offset
, item
);
6907 read_extent_buffer(leaf
, map
+ pg_offset
, ptr
,
6909 if (pg_offset
+ copy_size
< PAGE_SIZE
) {
6910 memset(map
+ pg_offset
+ copy_size
, 0,
6911 PAGE_SIZE
- pg_offset
-
6916 flush_dcache_page(page
);
6917 } else if (create
&& PageUptodate(page
)) {
6921 free_extent_map(em
);
6924 btrfs_release_path(path
);
6925 trans
= btrfs_join_transaction(root
);
6928 return ERR_CAST(trans
);
6932 write_extent_buffer(leaf
, map
+ pg_offset
, ptr
,
6935 btrfs_mark_buffer_dirty(leaf
);
6937 set_extent_uptodate(io_tree
, em
->start
,
6938 extent_map_end(em
) - 1, NULL
, GFP_NOFS
);
6943 em
->orig_start
= start
;
6946 em
->block_start
= EXTENT_MAP_HOLE
;
6947 set_bit(EXTENT_FLAG_VACANCY
, &em
->flags
);
6949 btrfs_release_path(path
);
6950 if (em
->start
> start
|| extent_map_end(em
) <= start
) {
6952 "bad extent! em: [%llu %llu] passed [%llu %llu]",
6953 em
->start
, em
->len
, start
, len
);
6959 write_lock(&em_tree
->lock
);
6960 ret
= add_extent_mapping(em_tree
, em
, 0);
6961 /* it is possible that someone inserted the extent into the tree
6962 * while we had the lock dropped. It is also possible that
6963 * an overlapping map exists in the tree
6965 if (ret
== -EEXIST
) {
6966 struct extent_map
*existing
;
6970 existing
= search_extent_mapping(em_tree
, start
, len
);
6972 * existing will always be non-NULL, since there must be
6973 * extent causing the -EEXIST.
6975 if (existing
->start
== em
->start
&&
6976 extent_map_end(existing
) >= extent_map_end(em
) &&
6977 em
->block_start
== existing
->block_start
) {
6979 * The existing extent map already encompasses the
6980 * entire extent map we tried to add.
6982 free_extent_map(em
);
6986 } else if (start
>= extent_map_end(existing
) ||
6987 start
<= existing
->start
) {
6989 * The existing extent map is the one nearest to
6990 * the [start, start + len) range which overlaps
6992 err
= merge_extent_mapping(em_tree
, existing
,
6994 free_extent_map(existing
);
6996 free_extent_map(em
);
7000 free_extent_map(em
);
7005 write_unlock(&em_tree
->lock
);
7008 trace_btrfs_get_extent(root
, inode
, em
);
7010 btrfs_free_path(path
);
7012 ret
= btrfs_end_transaction(trans
);
7017 free_extent_map(em
);
7018 return ERR_PTR(err
);
7020 BUG_ON(!em
); /* Error is always set */
7024 struct extent_map
*btrfs_get_extent_fiemap(struct btrfs_inode
*inode
,
7026 size_t pg_offset
, u64 start
, u64 len
,
7029 struct extent_map
*em
;
7030 struct extent_map
*hole_em
= NULL
;
7031 u64 range_start
= start
;
7037 em
= btrfs_get_extent(inode
, page
, pg_offset
, start
, len
, create
);
7044 * - a pre-alloc extent,
7045 * there might actually be delalloc bytes behind it.
7047 if (em
->block_start
!= EXTENT_MAP_HOLE
&&
7048 !test_bit(EXTENT_FLAG_PREALLOC
, &em
->flags
))
7054 /* check to see if we've wrapped (len == -1 or similar) */
7063 /* ok, we didn't find anything, lets look for delalloc */
7064 found
= count_range_bits(&inode
->io_tree
, &range_start
,
7065 end
, len
, EXTENT_DELALLOC
, 1);
7066 found_end
= range_start
+ found
;
7067 if (found_end
< range_start
)
7068 found_end
= (u64
)-1;
7071 * we didn't find anything useful, return
7072 * the original results from get_extent()
7074 if (range_start
> end
|| found_end
<= start
) {
7080 /* adjust the range_start to make sure it doesn't
7081 * go backwards from the start they passed in
7083 range_start
= max(start
, range_start
);
7084 found
= found_end
- range_start
;
7087 u64 hole_start
= start
;
7090 em
= alloc_extent_map();
7096 * when btrfs_get_extent can't find anything it
7097 * returns one huge hole
7099 * make sure what it found really fits our range, and
7100 * adjust to make sure it is based on the start from
7104 u64 calc_end
= extent_map_end(hole_em
);
7106 if (calc_end
<= start
|| (hole_em
->start
> end
)) {
7107 free_extent_map(hole_em
);
7110 hole_start
= max(hole_em
->start
, start
);
7111 hole_len
= calc_end
- hole_start
;
7115 if (hole_em
&& range_start
> hole_start
) {
7116 /* our hole starts before our delalloc, so we
7117 * have to return just the parts of the hole
7118 * that go until the delalloc starts
7120 em
->len
= min(hole_len
,
7121 range_start
- hole_start
);
7122 em
->start
= hole_start
;
7123 em
->orig_start
= hole_start
;
7125 * don't adjust block start at all,
7126 * it is fixed at EXTENT_MAP_HOLE
7128 em
->block_start
= hole_em
->block_start
;
7129 em
->block_len
= hole_len
;
7130 if (test_bit(EXTENT_FLAG_PREALLOC
, &hole_em
->flags
))
7131 set_bit(EXTENT_FLAG_PREALLOC
, &em
->flags
);
7133 em
->start
= range_start
;
7135 em
->orig_start
= range_start
;
7136 em
->block_start
= EXTENT_MAP_DELALLOC
;
7137 em
->block_len
= found
;
7139 } else if (hole_em
) {
7144 free_extent_map(hole_em
);
7146 free_extent_map(em
);
7147 return ERR_PTR(err
);
7152 static struct extent_map
*btrfs_create_dio_extent(struct inode
*inode
,
7155 const u64 orig_start
,
7156 const u64 block_start
,
7157 const u64 block_len
,
7158 const u64 orig_block_len
,
7159 const u64 ram_bytes
,
7162 struct extent_map
*em
= NULL
;
7165 if (type
!= BTRFS_ORDERED_NOCOW
) {
7166 em
= create_io_em(inode
, start
, len
, orig_start
,
7167 block_start
, block_len
, orig_block_len
,
7169 BTRFS_COMPRESS_NONE
, /* compress_type */
7174 ret
= btrfs_add_ordered_extent_dio(inode
, start
, block_start
,
7175 len
, block_len
, type
);
7178 free_extent_map(em
);
7179 btrfs_drop_extent_cache(BTRFS_I(inode
), start
,
7180 start
+ len
- 1, 0);
7189 static struct extent_map
*btrfs_new_extent_direct(struct inode
*inode
,
7192 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
7193 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
7194 struct extent_map
*em
;
7195 struct btrfs_key ins
;
7199 alloc_hint
= get_extent_allocation_hint(inode
, start
, len
);
7200 ret
= btrfs_reserve_extent(root
, len
, len
, fs_info
->sectorsize
,
7201 0, alloc_hint
, &ins
, 1, 1);
7203 return ERR_PTR(ret
);
7205 em
= btrfs_create_dio_extent(inode
, start
, ins
.offset
, start
,
7206 ins
.objectid
, ins
.offset
, ins
.offset
,
7207 ins
.offset
, BTRFS_ORDERED_REGULAR
);
7208 btrfs_dec_block_group_reservations(fs_info
, ins
.objectid
);
7210 btrfs_free_reserved_extent(fs_info
, ins
.objectid
,
7217 * returns 1 when the nocow is safe, < 1 on error, 0 if the
7218 * block must be cow'd
7220 noinline
int can_nocow_extent(struct inode
*inode
, u64 offset
, u64
*len
,
7221 u64
*orig_start
, u64
*orig_block_len
,
7224 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
7225 struct btrfs_path
*path
;
7227 struct extent_buffer
*leaf
;
7228 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
7229 struct extent_io_tree
*io_tree
= &BTRFS_I(inode
)->io_tree
;
7230 struct btrfs_file_extent_item
*fi
;
7231 struct btrfs_key key
;
7238 bool nocow
= (BTRFS_I(inode
)->flags
& BTRFS_INODE_NODATACOW
);
7240 path
= btrfs_alloc_path();
7244 ret
= btrfs_lookup_file_extent(NULL
, root
, path
,
7245 btrfs_ino(BTRFS_I(inode
)), offset
, 0);
7249 slot
= path
->slots
[0];
7252 /* can't find the item, must cow */
7259 leaf
= path
->nodes
[0];
7260 btrfs_item_key_to_cpu(leaf
, &key
, slot
);
7261 if (key
.objectid
!= btrfs_ino(BTRFS_I(inode
)) ||
7262 key
.type
!= BTRFS_EXTENT_DATA_KEY
) {
7263 /* not our file or wrong item type, must cow */
7267 if (key
.offset
> offset
) {
7268 /* Wrong offset, must cow */
7272 fi
= btrfs_item_ptr(leaf
, slot
, struct btrfs_file_extent_item
);
7273 found_type
= btrfs_file_extent_type(leaf
, fi
);
7274 if (found_type
!= BTRFS_FILE_EXTENT_REG
&&
7275 found_type
!= BTRFS_FILE_EXTENT_PREALLOC
) {
7276 /* not a regular extent, must cow */
7280 if (!nocow
&& found_type
== BTRFS_FILE_EXTENT_REG
)
7283 extent_end
= key
.offset
+ btrfs_file_extent_num_bytes(leaf
, fi
);
7284 if (extent_end
<= offset
)
7287 disk_bytenr
= btrfs_file_extent_disk_bytenr(leaf
, fi
);
7288 if (disk_bytenr
== 0)
7291 if (btrfs_file_extent_compression(leaf
, fi
) ||
7292 btrfs_file_extent_encryption(leaf
, fi
) ||
7293 btrfs_file_extent_other_encoding(leaf
, fi
))
7296 backref_offset
= btrfs_file_extent_offset(leaf
, fi
);
7299 *orig_start
= key
.offset
- backref_offset
;
7300 *orig_block_len
= btrfs_file_extent_disk_num_bytes(leaf
, fi
);
7301 *ram_bytes
= btrfs_file_extent_ram_bytes(leaf
, fi
);
7304 if (btrfs_extent_readonly(fs_info
, disk_bytenr
))
7307 num_bytes
= min(offset
+ *len
, extent_end
) - offset
;
7308 if (!nocow
&& found_type
== BTRFS_FILE_EXTENT_PREALLOC
) {
7311 range_end
= round_up(offset
+ num_bytes
,
7312 root
->fs_info
->sectorsize
) - 1;
7313 ret
= test_range_bit(io_tree
, offset
, range_end
,
7314 EXTENT_DELALLOC
, 0, NULL
);
7321 btrfs_release_path(path
);
7324 * look for other files referencing this extent, if we
7325 * find any we must cow
7328 ret
= btrfs_cross_ref_exist(root
, btrfs_ino(BTRFS_I(inode
)),
7329 key
.offset
- backref_offset
, disk_bytenr
);
7336 * adjust disk_bytenr and num_bytes to cover just the bytes
7337 * in this extent we are about to write. If there
7338 * are any csums in that range we have to cow in order
7339 * to keep the csums correct
7341 disk_bytenr
+= backref_offset
;
7342 disk_bytenr
+= offset
- key
.offset
;
7343 if (csum_exist_in_range(fs_info
, disk_bytenr
, num_bytes
))
7346 * all of the above have passed, it is safe to overwrite this extent
7352 btrfs_free_path(path
);
7356 bool btrfs_page_exists_in_range(struct inode
*inode
, loff_t start
, loff_t end
)
7358 struct radix_tree_root
*root
= &inode
->i_mapping
->page_tree
;
7360 void **pagep
= NULL
;
7361 struct page
*page
= NULL
;
7362 unsigned long start_idx
;
7363 unsigned long end_idx
;
7365 start_idx
= start
>> PAGE_SHIFT
;
7368 * end is the last byte in the last page. end == start is legal
7370 end_idx
= end
>> PAGE_SHIFT
;
7374 /* Most of the code in this while loop is lifted from
7375 * find_get_page. It's been modified to begin searching from a
7376 * page and return just the first page found in that range. If the
7377 * found idx is less than or equal to the end idx then we know that
7378 * a page exists. If no pages are found or if those pages are
7379 * outside of the range then we're fine (yay!) */
7380 while (page
== NULL
&&
7381 radix_tree_gang_lookup_slot(root
, &pagep
, NULL
, start_idx
, 1)) {
7382 page
= radix_tree_deref_slot(pagep
);
7383 if (unlikely(!page
))
7386 if (radix_tree_exception(page
)) {
7387 if (radix_tree_deref_retry(page
)) {
7392 * Otherwise, shmem/tmpfs must be storing a swap entry
7393 * here as an exceptional entry: so return it without
7394 * attempting to raise page count.
7397 break; /* TODO: Is this relevant for this use case? */
7400 if (!page_cache_get_speculative(page
)) {
7406 * Has the page moved?
7407 * This is part of the lockless pagecache protocol. See
7408 * include/linux/pagemap.h for details.
7410 if (unlikely(page
!= *pagep
)) {
7417 if (page
->index
<= end_idx
)
7426 static int lock_extent_direct(struct inode
*inode
, u64 lockstart
, u64 lockend
,
7427 struct extent_state
**cached_state
, int writing
)
7429 struct btrfs_ordered_extent
*ordered
;
7433 lock_extent_bits(&BTRFS_I(inode
)->io_tree
, lockstart
, lockend
,
7436 * We're concerned with the entire range that we're going to be
7437 * doing DIO to, so we need to make sure there's no ordered
7438 * extents in this range.
7440 ordered
= btrfs_lookup_ordered_range(BTRFS_I(inode
), lockstart
,
7441 lockend
- lockstart
+ 1);
7444 * We need to make sure there are no buffered pages in this
7445 * range either, we could have raced between the invalidate in
7446 * generic_file_direct_write and locking the extent. The
7447 * invalidate needs to happen so that reads after a write do not
7452 !btrfs_page_exists_in_range(inode
, lockstart
, lockend
)))
7455 unlock_extent_cached(&BTRFS_I(inode
)->io_tree
, lockstart
, lockend
,
7456 cached_state
, GFP_NOFS
);
7460 * If we are doing a DIO read and the ordered extent we
7461 * found is for a buffered write, we can not wait for it
7462 * to complete and retry, because if we do so we can
7463 * deadlock with concurrent buffered writes on page
7464 * locks. This happens only if our DIO read covers more
7465 * than one extent map, if at this point has already
7466 * created an ordered extent for a previous extent map
7467 * and locked its range in the inode's io tree, and a
7468 * concurrent write against that previous extent map's
7469 * range and this range started (we unlock the ranges
7470 * in the io tree only when the bios complete and
7471 * buffered writes always lock pages before attempting
7472 * to lock range in the io tree).
7475 test_bit(BTRFS_ORDERED_DIRECT
, &ordered
->flags
))
7476 btrfs_start_ordered_extent(inode
, ordered
, 1);
7479 btrfs_put_ordered_extent(ordered
);
7482 * We could trigger writeback for this range (and wait
7483 * for it to complete) and then invalidate the pages for
7484 * this range (through invalidate_inode_pages2_range()),
7485 * but that can lead us to a deadlock with a concurrent
7486 * call to readpages() (a buffered read or a defrag call
7487 * triggered a readahead) on a page lock due to an
7488 * ordered dio extent we created before but did not have
7489 * yet a corresponding bio submitted (whence it can not
7490 * complete), which makes readpages() wait for that
7491 * ordered extent to complete while holding a lock on
7506 /* The callers of this must take lock_extent() */
7507 static struct extent_map
*create_io_em(struct inode
*inode
, u64 start
, u64 len
,
7508 u64 orig_start
, u64 block_start
,
7509 u64 block_len
, u64 orig_block_len
,
7510 u64 ram_bytes
, int compress_type
,
7513 struct extent_map_tree
*em_tree
;
7514 struct extent_map
*em
;
7515 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
7518 ASSERT(type
== BTRFS_ORDERED_PREALLOC
||
7519 type
== BTRFS_ORDERED_COMPRESSED
||
7520 type
== BTRFS_ORDERED_NOCOW
||
7521 type
== BTRFS_ORDERED_REGULAR
);
7523 em_tree
= &BTRFS_I(inode
)->extent_tree
;
7524 em
= alloc_extent_map();
7526 return ERR_PTR(-ENOMEM
);
7529 em
->orig_start
= orig_start
;
7531 em
->block_len
= block_len
;
7532 em
->block_start
= block_start
;
7533 em
->bdev
= root
->fs_info
->fs_devices
->latest_bdev
;
7534 em
->orig_block_len
= orig_block_len
;
7535 em
->ram_bytes
= ram_bytes
;
7536 em
->generation
= -1;
7537 set_bit(EXTENT_FLAG_PINNED
, &em
->flags
);
7538 if (type
== BTRFS_ORDERED_PREALLOC
) {
7539 set_bit(EXTENT_FLAG_FILLING
, &em
->flags
);
7540 } else if (type
== BTRFS_ORDERED_COMPRESSED
) {
7541 set_bit(EXTENT_FLAG_COMPRESSED
, &em
->flags
);
7542 em
->compress_type
= compress_type
;
7546 btrfs_drop_extent_cache(BTRFS_I(inode
), em
->start
,
7547 em
->start
+ em
->len
- 1, 0);
7548 write_lock(&em_tree
->lock
);
7549 ret
= add_extent_mapping(em_tree
, em
, 1);
7550 write_unlock(&em_tree
->lock
);
7552 * The caller has taken lock_extent(), who could race with us
7555 } while (ret
== -EEXIST
);
7558 free_extent_map(em
);
7559 return ERR_PTR(ret
);
7562 /* em got 2 refs now, callers needs to do free_extent_map once. */
7566 static void adjust_dio_outstanding_extents(struct inode
*inode
,
7567 struct btrfs_dio_data
*dio_data
,
7570 unsigned num_extents
= count_max_extents(len
);
7573 * If we have an outstanding_extents count still set then we're
7574 * within our reservation, otherwise we need to adjust our inode
7575 * counter appropriately.
7577 if (dio_data
->outstanding_extents
>= num_extents
) {
7578 dio_data
->outstanding_extents
-= num_extents
;
7581 * If dio write length has been split due to no large enough
7582 * contiguous space, we need to compensate our inode counter
7585 u64 num_needed
= num_extents
- dio_data
->outstanding_extents
;
7587 spin_lock(&BTRFS_I(inode
)->lock
);
7588 BTRFS_I(inode
)->outstanding_extents
+= num_needed
;
7589 spin_unlock(&BTRFS_I(inode
)->lock
);
7593 static int btrfs_get_blocks_direct(struct inode
*inode
, sector_t iblock
,
7594 struct buffer_head
*bh_result
, int create
)
7596 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
7597 struct extent_map
*em
;
7598 struct extent_state
*cached_state
= NULL
;
7599 struct btrfs_dio_data
*dio_data
= NULL
;
7600 u64 start
= iblock
<< inode
->i_blkbits
;
7601 u64 lockstart
, lockend
;
7602 u64 len
= bh_result
->b_size
;
7603 int unlock_bits
= EXTENT_LOCKED
;
7607 unlock_bits
|= EXTENT_DIRTY
;
7609 len
= min_t(u64
, len
, fs_info
->sectorsize
);
7612 lockend
= start
+ len
- 1;
7614 if (current
->journal_info
) {
7616 * Need to pull our outstanding extents and set journal_info to NULL so
7617 * that anything that needs to check if there's a transaction doesn't get
7620 dio_data
= current
->journal_info
;
7621 current
->journal_info
= NULL
;
7625 * If this errors out it's because we couldn't invalidate pagecache for
7626 * this range and we need to fallback to buffered.
7628 if (lock_extent_direct(inode
, lockstart
, lockend
, &cached_state
,
7634 em
= btrfs_get_extent(BTRFS_I(inode
), NULL
, 0, start
, len
, 0);
7641 * Ok for INLINE and COMPRESSED extents we need to fallback on buffered
7642 * io. INLINE is special, and we could probably kludge it in here, but
7643 * it's still buffered so for safety lets just fall back to the generic
7646 * For COMPRESSED we _have_ to read the entire extent in so we can
7647 * decompress it, so there will be buffering required no matter what we
7648 * do, so go ahead and fallback to buffered.
7650 * We return -ENOTBLK because that's what makes DIO go ahead and go back
7651 * to buffered IO. Don't blame me, this is the price we pay for using
7654 if (test_bit(EXTENT_FLAG_COMPRESSED
, &em
->flags
) ||
7655 em
->block_start
== EXTENT_MAP_INLINE
) {
7656 free_extent_map(em
);
7661 /* Just a good old fashioned hole, return */
7662 if (!create
&& (em
->block_start
== EXTENT_MAP_HOLE
||
7663 test_bit(EXTENT_FLAG_PREALLOC
, &em
->flags
))) {
7664 free_extent_map(em
);
7669 * We don't allocate a new extent in the following cases
7671 * 1) The inode is marked as NODATACOW. In this case we'll just use the
7673 * 2) The extent is marked as PREALLOC. We're good to go here and can
7674 * just use the extent.
7678 len
= min(len
, em
->len
- (start
- em
->start
));
7679 lockstart
= start
+ len
;
7683 if (test_bit(EXTENT_FLAG_PREALLOC
, &em
->flags
) ||
7684 ((BTRFS_I(inode
)->flags
& BTRFS_INODE_NODATACOW
) &&
7685 em
->block_start
!= EXTENT_MAP_HOLE
)) {
7687 u64 block_start
, orig_start
, orig_block_len
, ram_bytes
;
7689 if (test_bit(EXTENT_FLAG_PREALLOC
, &em
->flags
))
7690 type
= BTRFS_ORDERED_PREALLOC
;
7692 type
= BTRFS_ORDERED_NOCOW
;
7693 len
= min(len
, em
->len
- (start
- em
->start
));
7694 block_start
= em
->block_start
+ (start
- em
->start
);
7696 if (can_nocow_extent(inode
, start
, &len
, &orig_start
,
7697 &orig_block_len
, &ram_bytes
) == 1 &&
7698 btrfs_inc_nocow_writers(fs_info
, block_start
)) {
7699 struct extent_map
*em2
;
7701 em2
= btrfs_create_dio_extent(inode
, start
, len
,
7702 orig_start
, block_start
,
7703 len
, orig_block_len
,
7705 btrfs_dec_nocow_writers(fs_info
, block_start
);
7706 if (type
== BTRFS_ORDERED_PREALLOC
) {
7707 free_extent_map(em
);
7710 if (em2
&& IS_ERR(em2
)) {
7715 * For inode marked NODATACOW or extent marked PREALLOC,
7716 * use the existing or preallocated extent, so does not
7717 * need to adjust btrfs_space_info's bytes_may_use.
7719 btrfs_free_reserved_data_space_noquota(inode
,
7726 * this will cow the extent, reset the len in case we changed
7729 len
= bh_result
->b_size
;
7730 free_extent_map(em
);
7731 em
= btrfs_new_extent_direct(inode
, start
, len
);
7736 len
= min(len
, em
->len
- (start
- em
->start
));
7738 bh_result
->b_blocknr
= (em
->block_start
+ (start
- em
->start
)) >>
7740 bh_result
->b_size
= len
;
7741 bh_result
->b_bdev
= em
->bdev
;
7742 set_buffer_mapped(bh_result
);
7744 if (!test_bit(EXTENT_FLAG_PREALLOC
, &em
->flags
))
7745 set_buffer_new(bh_result
);
7748 * Need to update the i_size under the extent lock so buffered
7749 * readers will get the updated i_size when we unlock.
7751 if (!dio_data
->overwrite
&& start
+ len
> i_size_read(inode
))
7752 i_size_write(inode
, start
+ len
);
7754 adjust_dio_outstanding_extents(inode
, dio_data
, len
);
7755 WARN_ON(dio_data
->reserve
< len
);
7756 dio_data
->reserve
-= len
;
7757 dio_data
->unsubmitted_oe_range_end
= start
+ len
;
7758 current
->journal_info
= dio_data
;
7762 * In the case of write we need to clear and unlock the entire range,
7763 * in the case of read we need to unlock only the end area that we
7764 * aren't using if there is any left over space.
7766 if (lockstart
< lockend
) {
7767 clear_extent_bit(&BTRFS_I(inode
)->io_tree
, lockstart
,
7768 lockend
, unlock_bits
, 1, 0,
7769 &cached_state
, GFP_NOFS
);
7771 free_extent_state(cached_state
);
7774 free_extent_map(em
);
7779 clear_extent_bit(&BTRFS_I(inode
)->io_tree
, lockstart
, lockend
,
7780 unlock_bits
, 1, 0, &cached_state
, GFP_NOFS
);
7783 current
->journal_info
= dio_data
;
7785 * Compensate the delalloc release we do in btrfs_direct_IO() when we
7786 * write less data then expected, so that we don't underflow our inode's
7787 * outstanding extents counter.
7789 if (create
&& dio_data
)
7790 adjust_dio_outstanding_extents(inode
, dio_data
, len
);
7795 static inline int submit_dio_repair_bio(struct inode
*inode
, struct bio
*bio
,
7798 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
7801 BUG_ON(bio_op(bio
) == REQ_OP_WRITE
);
7805 ret
= btrfs_bio_wq_end_io(fs_info
, bio
, BTRFS_WQ_ENDIO_DIO_REPAIR
);
7809 ret
= btrfs_map_bio(fs_info
, bio
, mirror_num
, 0);
7815 static int btrfs_check_dio_repairable(struct inode
*inode
,
7816 struct bio
*failed_bio
,
7817 struct io_failure_record
*failrec
,
7820 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
7823 num_copies
= btrfs_num_copies(fs_info
, failrec
->logical
, failrec
->len
);
7824 if (num_copies
== 1) {
7826 * we only have a single copy of the data, so don't bother with
7827 * all the retry and error correction code that follows. no
7828 * matter what the error is, it is very likely to persist.
7830 btrfs_debug(fs_info
,
7831 "Check DIO Repairable: cannot repair, num_copies=%d, next_mirror %d, failed_mirror %d",
7832 num_copies
, failrec
->this_mirror
, failed_mirror
);
7836 failrec
->failed_mirror
= failed_mirror
;
7837 failrec
->this_mirror
++;
7838 if (failrec
->this_mirror
== failed_mirror
)
7839 failrec
->this_mirror
++;
7841 if (failrec
->this_mirror
> num_copies
) {
7842 btrfs_debug(fs_info
,
7843 "Check DIO Repairable: (fail) num_copies=%d, next_mirror %d, failed_mirror %d",
7844 num_copies
, failrec
->this_mirror
, failed_mirror
);
7851 static int dio_read_error(struct inode
*inode
, struct bio
*failed_bio
,
7852 struct page
*page
, unsigned int pgoff
,
7853 u64 start
, u64 end
, int failed_mirror
,
7854 bio_end_io_t
*repair_endio
, void *repair_arg
)
7856 struct io_failure_record
*failrec
;
7862 BUG_ON(bio_op(failed_bio
) == REQ_OP_WRITE
);
7864 ret
= btrfs_get_io_failure_record(inode
, start
, end
, &failrec
);
7868 ret
= btrfs_check_dio_repairable(inode
, failed_bio
, failrec
,
7871 free_io_failure(BTRFS_I(inode
), failrec
);
7875 if ((failed_bio
->bi_vcnt
> 1)
7876 || (failed_bio
->bi_io_vec
->bv_len
7877 > btrfs_inode_sectorsize(inode
)))
7878 read_mode
|= REQ_FAILFAST_DEV
;
7880 isector
= start
- btrfs_io_bio(failed_bio
)->logical
;
7881 isector
>>= inode
->i_sb
->s_blocksize_bits
;
7882 bio
= btrfs_create_repair_bio(inode
, failed_bio
, failrec
, page
,
7883 pgoff
, isector
, repair_endio
, repair_arg
);
7885 free_io_failure(BTRFS_I(inode
), failrec
);
7888 bio_set_op_attrs(bio
, REQ_OP_READ
, read_mode
);
7890 btrfs_debug(BTRFS_I(inode
)->root
->fs_info
,
7891 "Repair DIO Read Error: submitting new dio read[%#x] to this_mirror=%d, in_validation=%d\n",
7892 read_mode
, failrec
->this_mirror
, failrec
->in_validation
);
7894 ret
= submit_dio_repair_bio(inode
, bio
, failrec
->this_mirror
);
7896 free_io_failure(BTRFS_I(inode
), failrec
);
7903 struct btrfs_retry_complete
{
7904 struct completion done
;
7905 struct inode
*inode
;
7910 static void btrfs_retry_endio_nocsum(struct bio
*bio
)
7912 struct btrfs_retry_complete
*done
= bio
->bi_private
;
7913 struct bio_vec
*bvec
;
7919 ASSERT(bio
->bi_vcnt
== 1);
7920 ASSERT(bio
->bi_io_vec
->bv_len
== btrfs_inode_sectorsize(done
->inode
));
7923 bio_for_each_segment_all(bvec
, bio
, i
)
7924 clean_io_failure(BTRFS_I(done
->inode
), done
->start
,
7927 complete(&done
->done
);
7931 static int __btrfs_correct_data_nocsum(struct inode
*inode
,
7932 struct btrfs_io_bio
*io_bio
)
7934 struct btrfs_fs_info
*fs_info
;
7935 struct bio_vec
*bvec
;
7936 struct btrfs_retry_complete done
;
7944 fs_info
= BTRFS_I(inode
)->root
->fs_info
;
7945 sectorsize
= fs_info
->sectorsize
;
7947 start
= io_bio
->logical
;
7950 bio_for_each_segment_all(bvec
, &io_bio
->bio
, i
) {
7951 nr_sectors
= BTRFS_BYTES_TO_BLKS(fs_info
, bvec
->bv_len
);
7952 pgoff
= bvec
->bv_offset
;
7954 next_block_or_try_again
:
7957 init_completion(&done
.done
);
7959 ret
= dio_read_error(inode
, &io_bio
->bio
, bvec
->bv_page
,
7960 pgoff
, start
, start
+ sectorsize
- 1,
7962 btrfs_retry_endio_nocsum
, &done
);
7966 wait_for_completion(&done
.done
);
7968 if (!done
.uptodate
) {
7969 /* We might have another mirror, so try again */
7970 goto next_block_or_try_again
;
7973 start
+= sectorsize
;
7977 pgoff
+= sectorsize
;
7978 ASSERT(pgoff
< PAGE_SIZE
);
7979 goto next_block_or_try_again
;
7986 static void btrfs_retry_endio(struct bio
*bio
)
7988 struct btrfs_retry_complete
*done
= bio
->bi_private
;
7989 struct btrfs_io_bio
*io_bio
= btrfs_io_bio(bio
);
7990 struct bio_vec
*bvec
;
8000 ASSERT(bio
->bi_vcnt
== 1);
8001 ASSERT(bio
->bi_io_vec
->bv_len
== btrfs_inode_sectorsize(done
->inode
));
8003 bio_for_each_segment_all(bvec
, bio
, i
) {
8004 ret
= __readpage_endio_check(done
->inode
, io_bio
, i
,
8005 bvec
->bv_page
, bvec
->bv_offset
,
8006 done
->start
, bvec
->bv_len
);
8008 clean_io_failure(BTRFS_I(done
->inode
), done
->start
,
8009 bvec
->bv_page
, bvec
->bv_offset
);
8014 done
->uptodate
= uptodate
;
8016 complete(&done
->done
);
8020 static int __btrfs_subio_endio_read(struct inode
*inode
,
8021 struct btrfs_io_bio
*io_bio
, int err
)
8023 struct btrfs_fs_info
*fs_info
;
8024 struct bio_vec
*bvec
;
8025 struct btrfs_retry_complete done
;
8035 fs_info
= BTRFS_I(inode
)->root
->fs_info
;
8036 sectorsize
= fs_info
->sectorsize
;
8039 start
= io_bio
->logical
;
8042 bio_for_each_segment_all(bvec
, &io_bio
->bio
, i
) {
8043 nr_sectors
= BTRFS_BYTES_TO_BLKS(fs_info
, bvec
->bv_len
);
8045 pgoff
= bvec
->bv_offset
;
8047 csum_pos
= BTRFS_BYTES_TO_BLKS(fs_info
, offset
);
8048 ret
= __readpage_endio_check(inode
, io_bio
, csum_pos
,
8049 bvec
->bv_page
, pgoff
, start
,
8056 init_completion(&done
.done
);
8058 ret
= dio_read_error(inode
, &io_bio
->bio
, bvec
->bv_page
,
8059 pgoff
, start
, start
+ sectorsize
- 1,
8061 btrfs_retry_endio
, &done
);
8067 wait_for_completion(&done
.done
);
8069 if (!done
.uptodate
) {
8070 /* We might have another mirror, so try again */
8074 offset
+= sectorsize
;
8075 start
+= sectorsize
;
8081 pgoff
+= sectorsize
;
8082 ASSERT(pgoff
< PAGE_SIZE
);
8090 static int btrfs_subio_endio_read(struct inode
*inode
,
8091 struct btrfs_io_bio
*io_bio
, int err
)
8093 bool skip_csum
= BTRFS_I(inode
)->flags
& BTRFS_INODE_NODATASUM
;
8097 return __btrfs_correct_data_nocsum(inode
, io_bio
);
8101 return __btrfs_subio_endio_read(inode
, io_bio
, err
);
8105 static void btrfs_endio_direct_read(struct bio
*bio
)
8107 struct btrfs_dio_private
*dip
= bio
->bi_private
;
8108 struct inode
*inode
= dip
->inode
;
8109 struct bio
*dio_bio
;
8110 struct btrfs_io_bio
*io_bio
= btrfs_io_bio(bio
);
8111 int err
= bio
->bi_error
;
8113 if (dip
->flags
& BTRFS_DIO_ORIG_BIO_SUBMITTED
)
8114 err
= btrfs_subio_endio_read(inode
, io_bio
, err
);
8116 unlock_extent(&BTRFS_I(inode
)->io_tree
, dip
->logical_offset
,
8117 dip
->logical_offset
+ dip
->bytes
- 1);
8118 dio_bio
= dip
->dio_bio
;
8122 dio_bio
->bi_error
= bio
->bi_error
;
8123 dio_end_io(dio_bio
, bio
->bi_error
);
8126 io_bio
->end_io(io_bio
, err
);
8130 static void btrfs_endio_direct_write_update_ordered(struct inode
*inode
,
8135 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
8136 struct btrfs_ordered_extent
*ordered
= NULL
;
8137 u64 ordered_offset
= offset
;
8138 u64 ordered_bytes
= bytes
;
8142 ret
= btrfs_dec_test_first_ordered_pending(inode
, &ordered
,
8149 btrfs_init_work(&ordered
->work
, btrfs_endio_write_helper
,
8150 finish_ordered_fn
, NULL
, NULL
);
8151 btrfs_queue_work(fs_info
->endio_write_workers
, &ordered
->work
);
8154 * our bio might span multiple ordered extents. If we haven't
8155 * completed the accounting for the whole dio, go back and try again
8157 if (ordered_offset
< offset
+ bytes
) {
8158 ordered_bytes
= offset
+ bytes
- ordered_offset
;
8164 static void btrfs_endio_direct_write(struct bio
*bio
)
8166 struct btrfs_dio_private
*dip
= bio
->bi_private
;
8167 struct bio
*dio_bio
= dip
->dio_bio
;
8169 btrfs_endio_direct_write_update_ordered(dip
->inode
,
8170 dip
->logical_offset
,
8176 dio_bio
->bi_error
= bio
->bi_error
;
8177 dio_end_io(dio_bio
, bio
->bi_error
);
8181 static int __btrfs_submit_bio_start_direct_io(struct inode
*inode
,
8182 struct bio
*bio
, int mirror_num
,
8183 unsigned long bio_flags
, u64 offset
)
8186 ret
= btrfs_csum_one_bio(inode
, bio
, offset
, 1);
8187 BUG_ON(ret
); /* -ENOMEM */
8191 static void btrfs_end_dio_bio(struct bio
*bio
)
8193 struct btrfs_dio_private
*dip
= bio
->bi_private
;
8194 int err
= bio
->bi_error
;
8197 btrfs_warn(BTRFS_I(dip
->inode
)->root
->fs_info
,
8198 "direct IO failed ino %llu rw %d,%u sector %#Lx len %u err no %d",
8199 btrfs_ino(BTRFS_I(dip
->inode
)), bio_op(bio
),
8201 (unsigned long long)bio
->bi_iter
.bi_sector
,
8202 bio
->bi_iter
.bi_size
, err
);
8204 if (dip
->subio_endio
)
8205 err
= dip
->subio_endio(dip
->inode
, btrfs_io_bio(bio
), err
);
8211 * before atomic variable goto zero, we must make sure
8212 * dip->errors is perceived to be set.
8214 smp_mb__before_atomic();
8217 /* if there are more bios still pending for this dio, just exit */
8218 if (!atomic_dec_and_test(&dip
->pending_bios
))
8222 bio_io_error(dip
->orig_bio
);
8224 dip
->dio_bio
->bi_error
= 0;
8225 bio_endio(dip
->orig_bio
);
8231 static struct bio
*btrfs_dio_bio_alloc(struct block_device
*bdev
,
8232 u64 first_sector
, gfp_t gfp_flags
)
8235 bio
= btrfs_bio_alloc(bdev
, first_sector
, BIO_MAX_PAGES
, gfp_flags
);
8237 bio_associate_current(bio
);
8241 static inline int btrfs_lookup_and_bind_dio_csum(struct inode
*inode
,
8242 struct btrfs_dio_private
*dip
,
8246 struct btrfs_io_bio
*io_bio
= btrfs_io_bio(bio
);
8247 struct btrfs_io_bio
*orig_io_bio
= btrfs_io_bio(dip
->orig_bio
);
8251 * We load all the csum data we need when we submit
8252 * the first bio to reduce the csum tree search and
8255 if (dip
->logical_offset
== file_offset
) {
8256 ret
= btrfs_lookup_bio_sums_dio(inode
, dip
->orig_bio
,
8262 if (bio
== dip
->orig_bio
)
8265 file_offset
-= dip
->logical_offset
;
8266 file_offset
>>= inode
->i_sb
->s_blocksize_bits
;
8267 io_bio
->csum
= (u8
*)(((u32
*)orig_io_bio
->csum
) + file_offset
);
8272 static inline int __btrfs_submit_dio_bio(struct bio
*bio
, struct inode
*inode
,
8273 u64 file_offset
, int skip_sum
,
8276 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
8277 struct btrfs_dio_private
*dip
= bio
->bi_private
;
8278 bool write
= bio_op(bio
) == REQ_OP_WRITE
;
8282 async_submit
= !atomic_read(&BTRFS_I(inode
)->sync_writers
);
8287 ret
= btrfs_bio_wq_end_io(fs_info
, bio
, BTRFS_WQ_ENDIO_DATA
);
8295 if (write
&& async_submit
) {
8296 ret
= btrfs_wq_submit_bio(fs_info
, inode
, bio
, 0, 0,
8298 __btrfs_submit_bio_start_direct_io
,
8299 __btrfs_submit_bio_done
);
8303 * If we aren't doing async submit, calculate the csum of the
8306 ret
= btrfs_csum_one_bio(inode
, bio
, file_offset
, 1);
8310 ret
= btrfs_lookup_and_bind_dio_csum(inode
, dip
, bio
,
8316 ret
= btrfs_map_bio(fs_info
, bio
, 0, async_submit
);
8322 static int btrfs_submit_direct_hook(struct btrfs_dio_private
*dip
,
8325 struct inode
*inode
= dip
->inode
;
8326 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
8327 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
8329 struct bio
*orig_bio
= dip
->orig_bio
;
8330 struct bio_vec
*bvec
;
8331 u64 start_sector
= orig_bio
->bi_iter
.bi_sector
;
8332 u64 file_offset
= dip
->logical_offset
;
8335 u32 blocksize
= fs_info
->sectorsize
;
8336 int async_submit
= 0;
8341 map_length
= orig_bio
->bi_iter
.bi_size
;
8342 ret
= btrfs_map_block(fs_info
, btrfs_op(orig_bio
), start_sector
<< 9,
8343 &map_length
, NULL
, 0);
8347 if (map_length
>= orig_bio
->bi_iter
.bi_size
) {
8349 dip
->flags
|= BTRFS_DIO_ORIG_BIO_SUBMITTED
;
8353 /* async crcs make it difficult to collect full stripe writes. */
8354 if (btrfs_get_alloc_profile(root
, 1) & BTRFS_BLOCK_GROUP_RAID56_MASK
)
8359 bio
= btrfs_dio_bio_alloc(orig_bio
->bi_bdev
, start_sector
, GFP_NOFS
);
8363 bio
->bi_opf
= orig_bio
->bi_opf
;
8364 bio
->bi_private
= dip
;
8365 bio
->bi_end_io
= btrfs_end_dio_bio
;
8366 btrfs_io_bio(bio
)->logical
= file_offset
;
8367 atomic_inc(&dip
->pending_bios
);
8369 bio_for_each_segment_all(bvec
, orig_bio
, j
) {
8370 nr_sectors
= BTRFS_BYTES_TO_BLKS(fs_info
, bvec
->bv_len
);
8373 if (unlikely(map_length
< submit_len
+ blocksize
||
8374 bio_add_page(bio
, bvec
->bv_page
, blocksize
,
8375 bvec
->bv_offset
+ (i
* blocksize
)) < blocksize
)) {
8377 * inc the count before we submit the bio so
8378 * we know the end IO handler won't happen before
8379 * we inc the count. Otherwise, the dip might get freed
8380 * before we're done setting it up
8382 atomic_inc(&dip
->pending_bios
);
8383 ret
= __btrfs_submit_dio_bio(bio
, inode
,
8384 file_offset
, skip_sum
,
8388 atomic_dec(&dip
->pending_bios
);
8392 start_sector
+= submit_len
>> 9;
8393 file_offset
+= submit_len
;
8397 bio
= btrfs_dio_bio_alloc(orig_bio
->bi_bdev
,
8398 start_sector
, GFP_NOFS
);
8401 bio
->bi_opf
= orig_bio
->bi_opf
;
8402 bio
->bi_private
= dip
;
8403 bio
->bi_end_io
= btrfs_end_dio_bio
;
8404 btrfs_io_bio(bio
)->logical
= file_offset
;
8406 map_length
= orig_bio
->bi_iter
.bi_size
;
8407 ret
= btrfs_map_block(fs_info
, btrfs_op(orig_bio
),
8409 &map_length
, NULL
, 0);
8417 submit_len
+= blocksize
;
8426 ret
= __btrfs_submit_dio_bio(bio
, inode
, file_offset
, skip_sum
,
8435 * before atomic variable goto zero, we must
8436 * make sure dip->errors is perceived to be set.
8438 smp_mb__before_atomic();
8439 if (atomic_dec_and_test(&dip
->pending_bios
))
8440 bio_io_error(dip
->orig_bio
);
8442 /* bio_end_io() will handle error, so we needn't return it */
8446 static void btrfs_submit_direct(struct bio
*dio_bio
, struct inode
*inode
,
8449 struct btrfs_dio_private
*dip
= NULL
;
8450 struct bio
*io_bio
= NULL
;
8451 struct btrfs_io_bio
*btrfs_bio
;
8453 bool write
= (bio_op(dio_bio
) == REQ_OP_WRITE
);
8456 skip_sum
= BTRFS_I(inode
)->flags
& BTRFS_INODE_NODATASUM
;
8458 io_bio
= btrfs_bio_clone(dio_bio
, GFP_NOFS
);
8464 dip
= kzalloc(sizeof(*dip
), GFP_NOFS
);
8470 dip
->private = dio_bio
->bi_private
;
8472 dip
->logical_offset
= file_offset
;
8473 dip
->bytes
= dio_bio
->bi_iter
.bi_size
;
8474 dip
->disk_bytenr
= (u64
)dio_bio
->bi_iter
.bi_sector
<< 9;
8475 io_bio
->bi_private
= dip
;
8476 dip
->orig_bio
= io_bio
;
8477 dip
->dio_bio
= dio_bio
;
8478 atomic_set(&dip
->pending_bios
, 0);
8479 btrfs_bio
= btrfs_io_bio(io_bio
);
8480 btrfs_bio
->logical
= file_offset
;
8483 io_bio
->bi_end_io
= btrfs_endio_direct_write
;
8485 io_bio
->bi_end_io
= btrfs_endio_direct_read
;
8486 dip
->subio_endio
= btrfs_subio_endio_read
;
8490 * Reset the range for unsubmitted ordered extents (to a 0 length range)
8491 * even if we fail to submit a bio, because in such case we do the
8492 * corresponding error handling below and it must not be done a second
8493 * time by btrfs_direct_IO().
8496 struct btrfs_dio_data
*dio_data
= current
->journal_info
;
8498 dio_data
->unsubmitted_oe_range_end
= dip
->logical_offset
+
8500 dio_data
->unsubmitted_oe_range_start
=
8501 dio_data
->unsubmitted_oe_range_end
;
8504 ret
= btrfs_submit_direct_hook(dip
, skip_sum
);
8508 if (btrfs_bio
->end_io
)
8509 btrfs_bio
->end_io(btrfs_bio
, ret
);
8513 * If we arrived here it means either we failed to submit the dip
8514 * or we either failed to clone the dio_bio or failed to allocate the
8515 * dip. If we cloned the dio_bio and allocated the dip, we can just
8516 * call bio_endio against our io_bio so that we get proper resource
8517 * cleanup if we fail to submit the dip, otherwise, we must do the
8518 * same as btrfs_endio_direct_[write|read] because we can't call these
8519 * callbacks - they require an allocated dip and a clone of dio_bio.
8521 if (io_bio
&& dip
) {
8522 io_bio
->bi_error
= -EIO
;
8525 * The end io callbacks free our dip, do the final put on io_bio
8526 * and all the cleanup and final put for dio_bio (through
8533 btrfs_endio_direct_write_update_ordered(inode
,
8535 dio_bio
->bi_iter
.bi_size
,
8538 unlock_extent(&BTRFS_I(inode
)->io_tree
, file_offset
,
8539 file_offset
+ dio_bio
->bi_iter
.bi_size
- 1);
8541 dio_bio
->bi_error
= -EIO
;
8543 * Releases and cleans up our dio_bio, no need to bio_put()
8544 * nor bio_endio()/bio_io_error() against dio_bio.
8546 dio_end_io(dio_bio
, ret
);
8553 static ssize_t
check_direct_IO(struct btrfs_fs_info
*fs_info
,
8555 const struct iov_iter
*iter
, loff_t offset
)
8559 unsigned int blocksize_mask
= fs_info
->sectorsize
- 1;
8560 ssize_t retval
= -EINVAL
;
8562 if (offset
& blocksize_mask
)
8565 if (iov_iter_alignment(iter
) & blocksize_mask
)
8568 /* If this is a write we don't need to check anymore */
8569 if (iov_iter_rw(iter
) != READ
|| !iter_is_iovec(iter
))
8572 * Check to make sure we don't have duplicate iov_base's in this
8573 * iovec, if so return EINVAL, otherwise we'll get csum errors
8574 * when reading back.
8576 for (seg
= 0; seg
< iter
->nr_segs
; seg
++) {
8577 for (i
= seg
+ 1; i
< iter
->nr_segs
; i
++) {
8578 if (iter
->iov
[seg
].iov_base
== iter
->iov
[i
].iov_base
)
8587 static ssize_t
btrfs_direct_IO(struct kiocb
*iocb
, struct iov_iter
*iter
)
8589 struct file
*file
= iocb
->ki_filp
;
8590 struct inode
*inode
= file
->f_mapping
->host
;
8591 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
8592 struct btrfs_dio_data dio_data
= { 0 };
8593 loff_t offset
= iocb
->ki_pos
;
8597 bool relock
= false;
8600 if (check_direct_IO(fs_info
, iocb
, iter
, offset
))
8603 inode_dio_begin(inode
);
8604 smp_mb__after_atomic();
8607 * The generic stuff only does filemap_write_and_wait_range, which
8608 * isn't enough if we've written compressed pages to this area, so
8609 * we need to flush the dirty pages again to make absolutely sure
8610 * that any outstanding dirty pages are on disk.
8612 count
= iov_iter_count(iter
);
8613 if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT
,
8614 &BTRFS_I(inode
)->runtime_flags
))
8615 filemap_fdatawrite_range(inode
->i_mapping
, offset
,
8616 offset
+ count
- 1);
8618 if (iov_iter_rw(iter
) == WRITE
) {
8620 * If the write DIO is beyond the EOF, we need update
8621 * the isize, but it is protected by i_mutex. So we can
8622 * not unlock the i_mutex at this case.
8624 if (offset
+ count
<= inode
->i_size
) {
8625 dio_data
.overwrite
= 1;
8626 inode_unlock(inode
);
8629 ret
= btrfs_delalloc_reserve_space(inode
, offset
, count
);
8632 dio_data
.outstanding_extents
= count_max_extents(count
);
8635 * We need to know how many extents we reserved so that we can
8636 * do the accounting properly if we go over the number we
8637 * originally calculated. Abuse current->journal_info for this.
8639 dio_data
.reserve
= round_up(count
,
8640 fs_info
->sectorsize
);
8641 dio_data
.unsubmitted_oe_range_start
= (u64
)offset
;
8642 dio_data
.unsubmitted_oe_range_end
= (u64
)offset
;
8643 current
->journal_info
= &dio_data
;
8644 down_read(&BTRFS_I(inode
)->dio_sem
);
8645 } else if (test_bit(BTRFS_INODE_READDIO_NEED_LOCK
,
8646 &BTRFS_I(inode
)->runtime_flags
)) {
8647 inode_dio_end(inode
);
8648 flags
= DIO_LOCKING
| DIO_SKIP_HOLES
;
8652 ret
= __blockdev_direct_IO(iocb
, inode
,
8653 fs_info
->fs_devices
->latest_bdev
,
8654 iter
, btrfs_get_blocks_direct
, NULL
,
8655 btrfs_submit_direct
, flags
);
8656 if (iov_iter_rw(iter
) == WRITE
) {
8657 up_read(&BTRFS_I(inode
)->dio_sem
);
8658 current
->journal_info
= NULL
;
8659 if (ret
< 0 && ret
!= -EIOCBQUEUED
) {
8660 if (dio_data
.reserve
)
8661 btrfs_delalloc_release_space(inode
, offset
,
8664 * On error we might have left some ordered extents
8665 * without submitting corresponding bios for them, so
8666 * cleanup them up to avoid other tasks getting them
8667 * and waiting for them to complete forever.
8669 if (dio_data
.unsubmitted_oe_range_start
<
8670 dio_data
.unsubmitted_oe_range_end
)
8671 btrfs_endio_direct_write_update_ordered(inode
,
8672 dio_data
.unsubmitted_oe_range_start
,
8673 dio_data
.unsubmitted_oe_range_end
-
8674 dio_data
.unsubmitted_oe_range_start
,
8676 } else if (ret
>= 0 && (size_t)ret
< count
)
8677 btrfs_delalloc_release_space(inode
, offset
,
8678 count
- (size_t)ret
);
8682 inode_dio_end(inode
);
8689 #define BTRFS_FIEMAP_FLAGS (FIEMAP_FLAG_SYNC)
8691 static int btrfs_fiemap(struct inode
*inode
, struct fiemap_extent_info
*fieinfo
,
8692 __u64 start
, __u64 len
)
8696 ret
= fiemap_check_flags(fieinfo
, BTRFS_FIEMAP_FLAGS
);
8700 return extent_fiemap(inode
, fieinfo
, start
, len
, btrfs_get_extent_fiemap
);
8703 int btrfs_readpage(struct file
*file
, struct page
*page
)
8705 struct extent_io_tree
*tree
;
8706 tree
= &BTRFS_I(page
->mapping
->host
)->io_tree
;
8707 return extent_read_full_page(tree
, page
, btrfs_get_extent
, 0);
8710 static int btrfs_writepage(struct page
*page
, struct writeback_control
*wbc
)
8712 struct extent_io_tree
*tree
;
8713 struct inode
*inode
= page
->mapping
->host
;
8716 if (current
->flags
& PF_MEMALLOC
) {
8717 redirty_page_for_writepage(wbc
, page
);
8723 * If we are under memory pressure we will call this directly from the
8724 * VM, we need to make sure we have the inode referenced for the ordered
8725 * extent. If not just return like we didn't do anything.
8727 if (!igrab(inode
)) {
8728 redirty_page_for_writepage(wbc
, page
);
8729 return AOP_WRITEPAGE_ACTIVATE
;
8731 tree
= &BTRFS_I(page
->mapping
->host
)->io_tree
;
8732 ret
= extent_write_full_page(tree
, page
, btrfs_get_extent
, wbc
);
8733 btrfs_add_delayed_iput(inode
);
8737 static int btrfs_writepages(struct address_space
*mapping
,
8738 struct writeback_control
*wbc
)
8740 struct extent_io_tree
*tree
;
8742 tree
= &BTRFS_I(mapping
->host
)->io_tree
;
8743 return extent_writepages(tree
, mapping
, btrfs_get_extent
, wbc
);
8747 btrfs_readpages(struct file
*file
, struct address_space
*mapping
,
8748 struct list_head
*pages
, unsigned nr_pages
)
8750 struct extent_io_tree
*tree
;
8751 tree
= &BTRFS_I(mapping
->host
)->io_tree
;
8752 return extent_readpages(tree
, mapping
, pages
, nr_pages
,
8755 static int __btrfs_releasepage(struct page
*page
, gfp_t gfp_flags
)
8757 struct extent_io_tree
*tree
;
8758 struct extent_map_tree
*map
;
8761 tree
= &BTRFS_I(page
->mapping
->host
)->io_tree
;
8762 map
= &BTRFS_I(page
->mapping
->host
)->extent_tree
;
8763 ret
= try_release_extent_mapping(map
, tree
, page
, gfp_flags
);
8765 ClearPagePrivate(page
);
8766 set_page_private(page
, 0);
8772 static int btrfs_releasepage(struct page
*page
, gfp_t gfp_flags
)
8774 if (PageWriteback(page
) || PageDirty(page
))
8776 return __btrfs_releasepage(page
, gfp_flags
);
8779 static void btrfs_invalidatepage(struct page
*page
, unsigned int offset
,
8780 unsigned int length
)
8782 struct inode
*inode
= page
->mapping
->host
;
8783 struct extent_io_tree
*tree
;
8784 struct btrfs_ordered_extent
*ordered
;
8785 struct extent_state
*cached_state
= NULL
;
8786 u64 page_start
= page_offset(page
);
8787 u64 page_end
= page_start
+ PAGE_SIZE
- 1;
8790 int inode_evicting
= inode
->i_state
& I_FREEING
;
8793 * we have the page locked, so new writeback can't start,
8794 * and the dirty bit won't be cleared while we are here.
8796 * Wait for IO on this page so that we can safely clear
8797 * the PagePrivate2 bit and do ordered accounting
8799 wait_on_page_writeback(page
);
8801 tree
= &BTRFS_I(inode
)->io_tree
;
8803 btrfs_releasepage(page
, GFP_NOFS
);
8807 if (!inode_evicting
)
8808 lock_extent_bits(tree
, page_start
, page_end
, &cached_state
);
8811 ordered
= btrfs_lookup_ordered_range(BTRFS_I(inode
), start
,
8812 page_end
- start
+ 1);
8814 end
= min(page_end
, ordered
->file_offset
+ ordered
->len
- 1);
8816 * IO on this page will never be started, so we need
8817 * to account for any ordered extents now
8819 if (!inode_evicting
)
8820 clear_extent_bit(tree
, start
, end
,
8821 EXTENT_DIRTY
| EXTENT_DELALLOC
|
8822 EXTENT_LOCKED
| EXTENT_DO_ACCOUNTING
|
8823 EXTENT_DEFRAG
, 1, 0, &cached_state
,
8826 * whoever cleared the private bit is responsible
8827 * for the finish_ordered_io
8829 if (TestClearPagePrivate2(page
)) {
8830 struct btrfs_ordered_inode_tree
*tree
;
8833 tree
= &BTRFS_I(inode
)->ordered_tree
;
8835 spin_lock_irq(&tree
->lock
);
8836 set_bit(BTRFS_ORDERED_TRUNCATED
, &ordered
->flags
);
8837 new_len
= start
- ordered
->file_offset
;
8838 if (new_len
< ordered
->truncated_len
)
8839 ordered
->truncated_len
= new_len
;
8840 spin_unlock_irq(&tree
->lock
);
8842 if (btrfs_dec_test_ordered_pending(inode
, &ordered
,
8844 end
- start
+ 1, 1))
8845 btrfs_finish_ordered_io(ordered
);
8847 btrfs_put_ordered_extent(ordered
);
8848 if (!inode_evicting
) {
8849 cached_state
= NULL
;
8850 lock_extent_bits(tree
, start
, end
,
8855 if (start
< page_end
)
8860 * Qgroup reserved space handler
8861 * Page here will be either
8862 * 1) Already written to disk
8863 * In this case, its reserved space is released from data rsv map
8864 * and will be freed by delayed_ref handler finally.
8865 * So even we call qgroup_free_data(), it won't decrease reserved
8867 * 2) Not written to disk
8868 * This means the reserved space should be freed here. However,
8869 * if a truncate invalidates the page (by clearing PageDirty)
8870 * and the page is accounted for while allocating extent
8871 * in btrfs_check_data_free_space() we let delayed_ref to
8872 * free the entire extent.
8874 if (PageDirty(page
))
8875 btrfs_qgroup_free_data(inode
, page_start
, PAGE_SIZE
);
8876 if (!inode_evicting
) {
8877 clear_extent_bit(tree
, page_start
, page_end
,
8878 EXTENT_LOCKED
| EXTENT_DIRTY
|
8879 EXTENT_DELALLOC
| EXTENT_DO_ACCOUNTING
|
8880 EXTENT_DEFRAG
, 1, 1,
8881 &cached_state
, GFP_NOFS
);
8883 __btrfs_releasepage(page
, GFP_NOFS
);
8886 ClearPageChecked(page
);
8887 if (PagePrivate(page
)) {
8888 ClearPagePrivate(page
);
8889 set_page_private(page
, 0);
8895 * btrfs_page_mkwrite() is not allowed to change the file size as it gets
8896 * called from a page fault handler when a page is first dirtied. Hence we must
8897 * be careful to check for EOF conditions here. We set the page up correctly
8898 * for a written page which means we get ENOSPC checking when writing into
8899 * holes and correct delalloc and unwritten extent mapping on filesystems that
8900 * support these features.
8902 * We are not allowed to take the i_mutex here so we have to play games to
8903 * protect against truncate races as the page could now be beyond EOF. Because
8904 * vmtruncate() writes the inode size before removing pages, once we have the
8905 * page lock we can determine safely if the page is beyond EOF. If it is not
8906 * beyond EOF, then the page is guaranteed safe against truncation until we
8909 int btrfs_page_mkwrite(struct vm_fault
*vmf
)
8911 struct page
*page
= vmf
->page
;
8912 struct inode
*inode
= file_inode(vmf
->vma
->vm_file
);
8913 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
8914 struct extent_io_tree
*io_tree
= &BTRFS_I(inode
)->io_tree
;
8915 struct btrfs_ordered_extent
*ordered
;
8916 struct extent_state
*cached_state
= NULL
;
8918 unsigned long zero_start
;
8927 reserved_space
= PAGE_SIZE
;
8929 sb_start_pagefault(inode
->i_sb
);
8930 page_start
= page_offset(page
);
8931 page_end
= page_start
+ PAGE_SIZE
- 1;
8935 * Reserving delalloc space after obtaining the page lock can lead to
8936 * deadlock. For example, if a dirty page is locked by this function
8937 * and the call to btrfs_delalloc_reserve_space() ends up triggering
8938 * dirty page write out, then the btrfs_writepage() function could
8939 * end up waiting indefinitely to get a lock on the page currently
8940 * being processed by btrfs_page_mkwrite() function.
8942 ret
= btrfs_delalloc_reserve_space(inode
, page_start
,
8945 ret
= file_update_time(vmf
->vma
->vm_file
);
8951 else /* -ENOSPC, -EIO, etc */
8952 ret
= VM_FAULT_SIGBUS
;
8958 ret
= VM_FAULT_NOPAGE
; /* make the VM retry the fault */
8961 size
= i_size_read(inode
);
8963 if ((page
->mapping
!= inode
->i_mapping
) ||
8964 (page_start
>= size
)) {
8965 /* page got truncated out from underneath us */
8968 wait_on_page_writeback(page
);
8970 lock_extent_bits(io_tree
, page_start
, page_end
, &cached_state
);
8971 set_page_extent_mapped(page
);
8974 * we can't set the delalloc bits if there are pending ordered
8975 * extents. Drop our locks and wait for them to finish
8977 ordered
= btrfs_lookup_ordered_range(BTRFS_I(inode
), page_start
,
8980 unlock_extent_cached(io_tree
, page_start
, page_end
,
8981 &cached_state
, GFP_NOFS
);
8983 btrfs_start_ordered_extent(inode
, ordered
, 1);
8984 btrfs_put_ordered_extent(ordered
);
8988 if (page
->index
== ((size
- 1) >> PAGE_SHIFT
)) {
8989 reserved_space
= round_up(size
- page_start
,
8990 fs_info
->sectorsize
);
8991 if (reserved_space
< PAGE_SIZE
) {
8992 end
= page_start
+ reserved_space
- 1;
8993 spin_lock(&BTRFS_I(inode
)->lock
);
8994 BTRFS_I(inode
)->outstanding_extents
++;
8995 spin_unlock(&BTRFS_I(inode
)->lock
);
8996 btrfs_delalloc_release_space(inode
, page_start
,
8997 PAGE_SIZE
- reserved_space
);
9002 * page_mkwrite gets called when the page is firstly dirtied after it's
9003 * faulted in, but write(2) could also dirty a page and set delalloc
9004 * bits, thus in this case for space account reason, we still need to
9005 * clear any delalloc bits within this page range since we have to
9006 * reserve data&meta space before lock_page() (see above comments).
9008 clear_extent_bit(&BTRFS_I(inode
)->io_tree
, page_start
, end
,
9009 EXTENT_DIRTY
| EXTENT_DELALLOC
|
9010 EXTENT_DO_ACCOUNTING
| EXTENT_DEFRAG
,
9011 0, 0, &cached_state
, GFP_NOFS
);
9013 ret
= btrfs_set_extent_delalloc(inode
, page_start
, end
,
9016 unlock_extent_cached(io_tree
, page_start
, page_end
,
9017 &cached_state
, GFP_NOFS
);
9018 ret
= VM_FAULT_SIGBUS
;
9023 /* page is wholly or partially inside EOF */
9024 if (page_start
+ PAGE_SIZE
> size
)
9025 zero_start
= size
& ~PAGE_MASK
;
9027 zero_start
= PAGE_SIZE
;
9029 if (zero_start
!= PAGE_SIZE
) {
9031 memset(kaddr
+ zero_start
, 0, PAGE_SIZE
- zero_start
);
9032 flush_dcache_page(page
);
9035 ClearPageChecked(page
);
9036 set_page_dirty(page
);
9037 SetPageUptodate(page
);
9039 BTRFS_I(inode
)->last_trans
= fs_info
->generation
;
9040 BTRFS_I(inode
)->last_sub_trans
= BTRFS_I(inode
)->root
->log_transid
;
9041 BTRFS_I(inode
)->last_log_commit
= BTRFS_I(inode
)->root
->last_log_commit
;
9043 unlock_extent_cached(io_tree
, page_start
, page_end
, &cached_state
, GFP_NOFS
);
9047 sb_end_pagefault(inode
->i_sb
);
9048 return VM_FAULT_LOCKED
;
9052 btrfs_delalloc_release_space(inode
, page_start
, reserved_space
);
9054 sb_end_pagefault(inode
->i_sb
);
9058 static int btrfs_truncate(struct inode
*inode
)
9060 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
9061 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
9062 struct btrfs_block_rsv
*rsv
;
9065 struct btrfs_trans_handle
*trans
;
9066 u64 mask
= fs_info
->sectorsize
- 1;
9067 u64 min_size
= btrfs_calc_trunc_metadata_size(fs_info
, 1);
9069 ret
= btrfs_wait_ordered_range(inode
, inode
->i_size
& (~mask
),
9075 * Yes ladies and gentlemen, this is indeed ugly. The fact is we have
9076 * 3 things going on here
9078 * 1) We need to reserve space for our orphan item and the space to
9079 * delete our orphan item. Lord knows we don't want to have a dangling
9080 * orphan item because we didn't reserve space to remove it.
9082 * 2) We need to reserve space to update our inode.
9084 * 3) We need to have something to cache all the space that is going to
9085 * be free'd up by the truncate operation, but also have some slack
9086 * space reserved in case it uses space during the truncate (thank you
9087 * very much snapshotting).
9089 * And we need these to all be separate. The fact is we can use a lot of
9090 * space doing the truncate, and we have no earthly idea how much space
9091 * we will use, so we need the truncate reservation to be separate so it
9092 * doesn't end up using space reserved for updating the inode or
9093 * removing the orphan item. We also need to be able to stop the
9094 * transaction and start a new one, which means we need to be able to
9095 * update the inode several times, and we have no idea of knowing how
9096 * many times that will be, so we can't just reserve 1 item for the
9097 * entirety of the operation, so that has to be done separately as well.
9098 * Then there is the orphan item, which does indeed need to be held on
9099 * to for the whole operation, and we need nobody to touch this reserved
9100 * space except the orphan code.
9102 * So that leaves us with
9104 * 1) root->orphan_block_rsv - for the orphan deletion.
9105 * 2) rsv - for the truncate reservation, which we will steal from the
9106 * transaction reservation.
9107 * 3) fs_info->trans_block_rsv - this will have 1 items worth left for
9108 * updating the inode.
9110 rsv
= btrfs_alloc_block_rsv(fs_info
, BTRFS_BLOCK_RSV_TEMP
);
9113 rsv
->size
= min_size
;
9117 * 1 for the truncate slack space
9118 * 1 for updating the inode.
9120 trans
= btrfs_start_transaction(root
, 2);
9121 if (IS_ERR(trans
)) {
9122 err
= PTR_ERR(trans
);
9126 /* Migrate the slack space for the truncate to our reserve */
9127 ret
= btrfs_block_rsv_migrate(&fs_info
->trans_block_rsv
, rsv
,
9132 * So if we truncate and then write and fsync we normally would just
9133 * write the extents that changed, which is a problem if we need to
9134 * first truncate that entire inode. So set this flag so we write out
9135 * all of the extents in the inode to the sync log so we're completely
9138 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC
, &BTRFS_I(inode
)->runtime_flags
);
9139 trans
->block_rsv
= rsv
;
9142 ret
= btrfs_truncate_inode_items(trans
, root
, inode
,
9144 BTRFS_EXTENT_DATA_KEY
);
9145 if (ret
!= -ENOSPC
&& ret
!= -EAGAIN
) {
9150 trans
->block_rsv
= &fs_info
->trans_block_rsv
;
9151 ret
= btrfs_update_inode(trans
, root
, inode
);
9157 btrfs_end_transaction(trans
);
9158 btrfs_btree_balance_dirty(fs_info
);
9160 trans
= btrfs_start_transaction(root
, 2);
9161 if (IS_ERR(trans
)) {
9162 ret
= err
= PTR_ERR(trans
);
9167 btrfs_block_rsv_release(fs_info
, rsv
, -1);
9168 ret
= btrfs_block_rsv_migrate(&fs_info
->trans_block_rsv
,
9170 BUG_ON(ret
); /* shouldn't happen */
9171 trans
->block_rsv
= rsv
;
9174 if (ret
== 0 && inode
->i_nlink
> 0) {
9175 trans
->block_rsv
= root
->orphan_block_rsv
;
9176 ret
= btrfs_orphan_del(trans
, BTRFS_I(inode
));
9182 trans
->block_rsv
= &fs_info
->trans_block_rsv
;
9183 ret
= btrfs_update_inode(trans
, root
, inode
);
9187 ret
= btrfs_end_transaction(trans
);
9188 btrfs_btree_balance_dirty(fs_info
);
9191 btrfs_free_block_rsv(fs_info
, rsv
);
9200 * create a new subvolume directory/inode (helper for the ioctl).
9202 int btrfs_create_subvol_root(struct btrfs_trans_handle
*trans
,
9203 struct btrfs_root
*new_root
,
9204 struct btrfs_root
*parent_root
,
9207 struct inode
*inode
;
9211 inode
= btrfs_new_inode(trans
, new_root
, NULL
, "..", 2,
9212 new_dirid
, new_dirid
,
9213 S_IFDIR
| (~current_umask() & S_IRWXUGO
),
9216 return PTR_ERR(inode
);
9217 inode
->i_op
= &btrfs_dir_inode_operations
;
9218 inode
->i_fop
= &btrfs_dir_file_operations
;
9220 set_nlink(inode
, 1);
9221 btrfs_i_size_write(BTRFS_I(inode
), 0);
9222 unlock_new_inode(inode
);
9224 err
= btrfs_subvol_inherit_props(trans
, new_root
, parent_root
);
9226 btrfs_err(new_root
->fs_info
,
9227 "error inheriting subvolume %llu properties: %d",
9228 new_root
->root_key
.objectid
, err
);
9230 err
= btrfs_update_inode(trans
, new_root
, inode
);
9236 struct inode
*btrfs_alloc_inode(struct super_block
*sb
)
9238 struct btrfs_inode
*ei
;
9239 struct inode
*inode
;
9241 ei
= kmem_cache_alloc(btrfs_inode_cachep
, GFP_NOFS
);
9248 ei
->last_sub_trans
= 0;
9249 ei
->logged_trans
= 0;
9250 ei
->delalloc_bytes
= 0;
9251 ei
->defrag_bytes
= 0;
9252 ei
->disk_i_size
= 0;
9255 ei
->index_cnt
= (u64
)-1;
9257 ei
->last_unlink_trans
= 0;
9258 ei
->last_log_commit
= 0;
9259 ei
->delayed_iput_count
= 0;
9261 spin_lock_init(&ei
->lock
);
9262 ei
->outstanding_extents
= 0;
9263 ei
->reserved_extents
= 0;
9265 ei
->runtime_flags
= 0;
9266 ei
->force_compress
= BTRFS_COMPRESS_NONE
;
9268 ei
->delayed_node
= NULL
;
9270 ei
->i_otime
.tv_sec
= 0;
9271 ei
->i_otime
.tv_nsec
= 0;
9273 inode
= &ei
->vfs_inode
;
9274 extent_map_tree_init(&ei
->extent_tree
);
9275 extent_io_tree_init(&ei
->io_tree
, &inode
->i_data
);
9276 extent_io_tree_init(&ei
->io_failure_tree
, &inode
->i_data
);
9277 ei
->io_tree
.track_uptodate
= 1;
9278 ei
->io_failure_tree
.track_uptodate
= 1;
9279 atomic_set(&ei
->sync_writers
, 0);
9280 mutex_init(&ei
->log_mutex
);
9281 mutex_init(&ei
->delalloc_mutex
);
9282 btrfs_ordered_inode_tree_init(&ei
->ordered_tree
);
9283 INIT_LIST_HEAD(&ei
->delalloc_inodes
);
9284 INIT_LIST_HEAD(&ei
->delayed_iput
);
9285 RB_CLEAR_NODE(&ei
->rb_node
);
9286 init_rwsem(&ei
->dio_sem
);
9291 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
9292 void btrfs_test_destroy_inode(struct inode
*inode
)
9294 btrfs_drop_extent_cache(BTRFS_I(inode
), 0, (u64
)-1, 0);
9295 kmem_cache_free(btrfs_inode_cachep
, BTRFS_I(inode
));
9299 static void btrfs_i_callback(struct rcu_head
*head
)
9301 struct inode
*inode
= container_of(head
, struct inode
, i_rcu
);
9302 kmem_cache_free(btrfs_inode_cachep
, BTRFS_I(inode
));
9305 void btrfs_destroy_inode(struct inode
*inode
)
9307 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
9308 struct btrfs_ordered_extent
*ordered
;
9309 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
9311 WARN_ON(!hlist_empty(&inode
->i_dentry
));
9312 WARN_ON(inode
->i_data
.nrpages
);
9313 WARN_ON(BTRFS_I(inode
)->outstanding_extents
);
9314 WARN_ON(BTRFS_I(inode
)->reserved_extents
);
9315 WARN_ON(BTRFS_I(inode
)->delalloc_bytes
);
9316 WARN_ON(BTRFS_I(inode
)->csum_bytes
);
9317 WARN_ON(BTRFS_I(inode
)->defrag_bytes
);
9320 * This can happen where we create an inode, but somebody else also
9321 * created the same inode and we need to destroy the one we already
9327 if (test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM
,
9328 &BTRFS_I(inode
)->runtime_flags
)) {
9329 btrfs_info(fs_info
, "inode %llu still on the orphan list",
9330 btrfs_ino(BTRFS_I(inode
)));
9331 atomic_dec(&root
->orphan_inodes
);
9335 ordered
= btrfs_lookup_first_ordered_extent(inode
, (u64
)-1);
9340 "found ordered extent %llu %llu on inode cleanup",
9341 ordered
->file_offset
, ordered
->len
);
9342 btrfs_remove_ordered_extent(inode
, ordered
);
9343 btrfs_put_ordered_extent(ordered
);
9344 btrfs_put_ordered_extent(ordered
);
9347 btrfs_qgroup_check_reserved_leak(inode
);
9348 inode_tree_del(inode
);
9349 btrfs_drop_extent_cache(BTRFS_I(inode
), 0, (u64
)-1, 0);
9351 call_rcu(&inode
->i_rcu
, btrfs_i_callback
);
9354 int btrfs_drop_inode(struct inode
*inode
)
9356 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
9361 /* the snap/subvol tree is on deleting */
9362 if (btrfs_root_refs(&root
->root_item
) == 0)
9365 return generic_drop_inode(inode
);
9368 static void init_once(void *foo
)
9370 struct btrfs_inode
*ei
= (struct btrfs_inode
*) foo
;
9372 inode_init_once(&ei
->vfs_inode
);
9375 void btrfs_destroy_cachep(void)
9378 * Make sure all delayed rcu free inodes are flushed before we
9382 kmem_cache_destroy(btrfs_inode_cachep
);
9383 kmem_cache_destroy(btrfs_trans_handle_cachep
);
9384 kmem_cache_destroy(btrfs_transaction_cachep
);
9385 kmem_cache_destroy(btrfs_path_cachep
);
9386 kmem_cache_destroy(btrfs_free_space_cachep
);
9389 int btrfs_init_cachep(void)
9391 btrfs_inode_cachep
= kmem_cache_create("btrfs_inode",
9392 sizeof(struct btrfs_inode
), 0,
9393 SLAB_RECLAIM_ACCOUNT
| SLAB_MEM_SPREAD
| SLAB_ACCOUNT
,
9395 if (!btrfs_inode_cachep
)
9398 btrfs_trans_handle_cachep
= kmem_cache_create("btrfs_trans_handle",
9399 sizeof(struct btrfs_trans_handle
), 0,
9400 SLAB_TEMPORARY
| SLAB_MEM_SPREAD
, NULL
);
9401 if (!btrfs_trans_handle_cachep
)
9404 btrfs_transaction_cachep
= kmem_cache_create("btrfs_transaction",
9405 sizeof(struct btrfs_transaction
), 0,
9406 SLAB_TEMPORARY
| SLAB_MEM_SPREAD
, NULL
);
9407 if (!btrfs_transaction_cachep
)
9410 btrfs_path_cachep
= kmem_cache_create("btrfs_path",
9411 sizeof(struct btrfs_path
), 0,
9412 SLAB_MEM_SPREAD
, NULL
);
9413 if (!btrfs_path_cachep
)
9416 btrfs_free_space_cachep
= kmem_cache_create("btrfs_free_space",
9417 sizeof(struct btrfs_free_space
), 0,
9418 SLAB_MEM_SPREAD
, NULL
);
9419 if (!btrfs_free_space_cachep
)
9424 btrfs_destroy_cachep();
9428 static int btrfs_getattr(const struct path
*path
, struct kstat
*stat
,
9429 u32 request_mask
, unsigned int flags
)
9432 struct inode
*inode
= d_inode(path
->dentry
);
9433 u32 blocksize
= inode
->i_sb
->s_blocksize
;
9435 generic_fillattr(inode
, stat
);
9436 stat
->dev
= BTRFS_I(inode
)->root
->anon_dev
;
9438 spin_lock(&BTRFS_I(inode
)->lock
);
9439 delalloc_bytes
= BTRFS_I(inode
)->delalloc_bytes
;
9440 spin_unlock(&BTRFS_I(inode
)->lock
);
9441 stat
->blocks
= (ALIGN(inode_get_bytes(inode
), blocksize
) +
9442 ALIGN(delalloc_bytes
, blocksize
)) >> 9;
9446 static int btrfs_rename_exchange(struct inode
*old_dir
,
9447 struct dentry
*old_dentry
,
9448 struct inode
*new_dir
,
9449 struct dentry
*new_dentry
)
9451 struct btrfs_fs_info
*fs_info
= btrfs_sb(old_dir
->i_sb
);
9452 struct btrfs_trans_handle
*trans
;
9453 struct btrfs_root
*root
= BTRFS_I(old_dir
)->root
;
9454 struct btrfs_root
*dest
= BTRFS_I(new_dir
)->root
;
9455 struct inode
*new_inode
= new_dentry
->d_inode
;
9456 struct inode
*old_inode
= old_dentry
->d_inode
;
9457 struct timespec ctime
= current_time(old_inode
);
9458 struct dentry
*parent
;
9459 u64 old_ino
= btrfs_ino(BTRFS_I(old_inode
));
9460 u64 new_ino
= btrfs_ino(BTRFS_I(new_inode
));
9465 bool root_log_pinned
= false;
9466 bool dest_log_pinned
= false;
9468 /* we only allow rename subvolume link between subvolumes */
9469 if (old_ino
!= BTRFS_FIRST_FREE_OBJECTID
&& root
!= dest
)
9472 /* close the race window with snapshot create/destroy ioctl */
9473 if (old_ino
== BTRFS_FIRST_FREE_OBJECTID
)
9474 down_read(&fs_info
->subvol_sem
);
9475 if (new_ino
== BTRFS_FIRST_FREE_OBJECTID
)
9476 down_read(&fs_info
->subvol_sem
);
9479 * We want to reserve the absolute worst case amount of items. So if
9480 * both inodes are subvols and we need to unlink them then that would
9481 * require 4 item modifications, but if they are both normal inodes it
9482 * would require 5 item modifications, so we'll assume their normal
9483 * inodes. So 5 * 2 is 10, plus 2 for the new links, so 12 total items
9484 * should cover the worst case number of items we'll modify.
9486 trans
= btrfs_start_transaction(root
, 12);
9487 if (IS_ERR(trans
)) {
9488 ret
= PTR_ERR(trans
);
9493 * We need to find a free sequence number both in the source and
9494 * in the destination directory for the exchange.
9496 ret
= btrfs_set_inode_index(BTRFS_I(new_dir
), &old_idx
);
9499 ret
= btrfs_set_inode_index(BTRFS_I(old_dir
), &new_idx
);
9503 BTRFS_I(old_inode
)->dir_index
= 0ULL;
9504 BTRFS_I(new_inode
)->dir_index
= 0ULL;
9506 /* Reference for the source. */
9507 if (old_ino
== BTRFS_FIRST_FREE_OBJECTID
) {
9508 /* force full log commit if subvolume involved. */
9509 btrfs_set_log_full_commit(fs_info
, trans
);
9511 btrfs_pin_log_trans(root
);
9512 root_log_pinned
= true;
9513 ret
= btrfs_insert_inode_ref(trans
, dest
,
9514 new_dentry
->d_name
.name
,
9515 new_dentry
->d_name
.len
,
9517 btrfs_ino(BTRFS_I(new_dir
)),
9523 /* And now for the dest. */
9524 if (new_ino
== BTRFS_FIRST_FREE_OBJECTID
) {
9525 /* force full log commit if subvolume involved. */
9526 btrfs_set_log_full_commit(fs_info
, trans
);
9528 btrfs_pin_log_trans(dest
);
9529 dest_log_pinned
= true;
9530 ret
= btrfs_insert_inode_ref(trans
, root
,
9531 old_dentry
->d_name
.name
,
9532 old_dentry
->d_name
.len
,
9534 btrfs_ino(BTRFS_I(old_dir
)),
9540 /* Update inode version and ctime/mtime. */
9541 inode_inc_iversion(old_dir
);
9542 inode_inc_iversion(new_dir
);
9543 inode_inc_iversion(old_inode
);
9544 inode_inc_iversion(new_inode
);
9545 old_dir
->i_ctime
= old_dir
->i_mtime
= ctime
;
9546 new_dir
->i_ctime
= new_dir
->i_mtime
= ctime
;
9547 old_inode
->i_ctime
= ctime
;
9548 new_inode
->i_ctime
= ctime
;
9550 if (old_dentry
->d_parent
!= new_dentry
->d_parent
) {
9551 btrfs_record_unlink_dir(trans
, BTRFS_I(old_dir
),
9552 BTRFS_I(old_inode
), 1);
9553 btrfs_record_unlink_dir(trans
, BTRFS_I(new_dir
),
9554 BTRFS_I(new_inode
), 1);
9557 /* src is a subvolume */
9558 if (old_ino
== BTRFS_FIRST_FREE_OBJECTID
) {
9559 root_objectid
= BTRFS_I(old_inode
)->root
->root_key
.objectid
;
9560 ret
= btrfs_unlink_subvol(trans
, root
, old_dir
,
9562 old_dentry
->d_name
.name
,
9563 old_dentry
->d_name
.len
);
9564 } else { /* src is an inode */
9565 ret
= __btrfs_unlink_inode(trans
, root
, BTRFS_I(old_dir
),
9566 BTRFS_I(old_dentry
->d_inode
),
9567 old_dentry
->d_name
.name
,
9568 old_dentry
->d_name
.len
);
9570 ret
= btrfs_update_inode(trans
, root
, old_inode
);
9573 btrfs_abort_transaction(trans
, ret
);
9577 /* dest is a subvolume */
9578 if (new_ino
== BTRFS_FIRST_FREE_OBJECTID
) {
9579 root_objectid
= BTRFS_I(new_inode
)->root
->root_key
.objectid
;
9580 ret
= btrfs_unlink_subvol(trans
, dest
, new_dir
,
9582 new_dentry
->d_name
.name
,
9583 new_dentry
->d_name
.len
);
9584 } else { /* dest is an inode */
9585 ret
= __btrfs_unlink_inode(trans
, dest
, BTRFS_I(new_dir
),
9586 BTRFS_I(new_dentry
->d_inode
),
9587 new_dentry
->d_name
.name
,
9588 new_dentry
->d_name
.len
);
9590 ret
= btrfs_update_inode(trans
, dest
, new_inode
);
9593 btrfs_abort_transaction(trans
, ret
);
9597 ret
= btrfs_add_link(trans
, BTRFS_I(new_dir
), BTRFS_I(old_inode
),
9598 new_dentry
->d_name
.name
,
9599 new_dentry
->d_name
.len
, 0, old_idx
);
9601 btrfs_abort_transaction(trans
, ret
);
9605 ret
= btrfs_add_link(trans
, BTRFS_I(old_dir
), BTRFS_I(new_inode
),
9606 old_dentry
->d_name
.name
,
9607 old_dentry
->d_name
.len
, 0, new_idx
);
9609 btrfs_abort_transaction(trans
, ret
);
9613 if (old_inode
->i_nlink
== 1)
9614 BTRFS_I(old_inode
)->dir_index
= old_idx
;
9615 if (new_inode
->i_nlink
== 1)
9616 BTRFS_I(new_inode
)->dir_index
= new_idx
;
9618 if (root_log_pinned
) {
9619 parent
= new_dentry
->d_parent
;
9620 btrfs_log_new_name(trans
, BTRFS_I(old_inode
), BTRFS_I(old_dir
),
9622 btrfs_end_log_trans(root
);
9623 root_log_pinned
= false;
9625 if (dest_log_pinned
) {
9626 parent
= old_dentry
->d_parent
;
9627 btrfs_log_new_name(trans
, BTRFS_I(new_inode
), BTRFS_I(new_dir
),
9629 btrfs_end_log_trans(dest
);
9630 dest_log_pinned
= false;
9634 * If we have pinned a log and an error happened, we unpin tasks
9635 * trying to sync the log and force them to fallback to a transaction
9636 * commit if the log currently contains any of the inodes involved in
9637 * this rename operation (to ensure we do not persist a log with an
9638 * inconsistent state for any of these inodes or leading to any
9639 * inconsistencies when replayed). If the transaction was aborted, the
9640 * abortion reason is propagated to userspace when attempting to commit
9641 * the transaction. If the log does not contain any of these inodes, we
9642 * allow the tasks to sync it.
9644 if (ret
&& (root_log_pinned
|| dest_log_pinned
)) {
9645 if (btrfs_inode_in_log(BTRFS_I(old_dir
), fs_info
->generation
) ||
9646 btrfs_inode_in_log(BTRFS_I(new_dir
), fs_info
->generation
) ||
9647 btrfs_inode_in_log(BTRFS_I(old_inode
), fs_info
->generation
) ||
9649 btrfs_inode_in_log(BTRFS_I(new_inode
), fs_info
->generation
)))
9650 btrfs_set_log_full_commit(fs_info
, trans
);
9652 if (root_log_pinned
) {
9653 btrfs_end_log_trans(root
);
9654 root_log_pinned
= false;
9656 if (dest_log_pinned
) {
9657 btrfs_end_log_trans(dest
);
9658 dest_log_pinned
= false;
9661 ret
= btrfs_end_transaction(trans
);
9663 if (new_ino
== BTRFS_FIRST_FREE_OBJECTID
)
9664 up_read(&fs_info
->subvol_sem
);
9665 if (old_ino
== BTRFS_FIRST_FREE_OBJECTID
)
9666 up_read(&fs_info
->subvol_sem
);
9671 static int btrfs_whiteout_for_rename(struct btrfs_trans_handle
*trans
,
9672 struct btrfs_root
*root
,
9674 struct dentry
*dentry
)
9677 struct inode
*inode
;
9681 ret
= btrfs_find_free_ino(root
, &objectid
);
9685 inode
= btrfs_new_inode(trans
, root
, dir
,
9686 dentry
->d_name
.name
,
9688 btrfs_ino(BTRFS_I(dir
)),
9690 S_IFCHR
| WHITEOUT_MODE
,
9693 if (IS_ERR(inode
)) {
9694 ret
= PTR_ERR(inode
);
9698 inode
->i_op
= &btrfs_special_inode_operations
;
9699 init_special_inode(inode
, inode
->i_mode
,
9702 ret
= btrfs_init_inode_security(trans
, inode
, dir
,
9707 ret
= btrfs_add_nondir(trans
, BTRFS_I(dir
), dentry
,
9708 BTRFS_I(inode
), 0, index
);
9712 ret
= btrfs_update_inode(trans
, root
, inode
);
9714 unlock_new_inode(inode
);
9716 inode_dec_link_count(inode
);
9722 static int btrfs_rename(struct inode
*old_dir
, struct dentry
*old_dentry
,
9723 struct inode
*new_dir
, struct dentry
*new_dentry
,
9726 struct btrfs_fs_info
*fs_info
= btrfs_sb(old_dir
->i_sb
);
9727 struct btrfs_trans_handle
*trans
;
9728 unsigned int trans_num_items
;
9729 struct btrfs_root
*root
= BTRFS_I(old_dir
)->root
;
9730 struct btrfs_root
*dest
= BTRFS_I(new_dir
)->root
;
9731 struct inode
*new_inode
= d_inode(new_dentry
);
9732 struct inode
*old_inode
= d_inode(old_dentry
);
9736 u64 old_ino
= btrfs_ino(BTRFS_I(old_inode
));
9737 bool log_pinned
= false;
9739 if (btrfs_ino(BTRFS_I(new_dir
)) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID
)
9742 /* we only allow rename subvolume link between subvolumes */
9743 if (old_ino
!= BTRFS_FIRST_FREE_OBJECTID
&& root
!= dest
)
9746 if (old_ino
== BTRFS_EMPTY_SUBVOL_DIR_OBJECTID
||
9747 (new_inode
&& btrfs_ino(BTRFS_I(new_inode
)) == BTRFS_FIRST_FREE_OBJECTID
))
9750 if (S_ISDIR(old_inode
->i_mode
) && new_inode
&&
9751 new_inode
->i_size
> BTRFS_EMPTY_DIR_SIZE
)
9755 /* check for collisions, even if the name isn't there */
9756 ret
= btrfs_check_dir_item_collision(dest
, new_dir
->i_ino
,
9757 new_dentry
->d_name
.name
,
9758 new_dentry
->d_name
.len
);
9761 if (ret
== -EEXIST
) {
9763 * eexist without a new_inode */
9764 if (WARN_ON(!new_inode
)) {
9768 /* maybe -EOVERFLOW */
9775 * we're using rename to replace one file with another. Start IO on it
9776 * now so we don't add too much work to the end of the transaction
9778 if (new_inode
&& S_ISREG(old_inode
->i_mode
) && new_inode
->i_size
)
9779 filemap_flush(old_inode
->i_mapping
);
9781 /* close the racy window with snapshot create/destroy ioctl */
9782 if (old_ino
== BTRFS_FIRST_FREE_OBJECTID
)
9783 down_read(&fs_info
->subvol_sem
);
9785 * We want to reserve the absolute worst case amount of items. So if
9786 * both inodes are subvols and we need to unlink them then that would
9787 * require 4 item modifications, but if they are both normal inodes it
9788 * would require 5 item modifications, so we'll assume they are normal
9789 * inodes. So 5 * 2 is 10, plus 1 for the new link, so 11 total items
9790 * should cover the worst case number of items we'll modify.
9791 * If our rename has the whiteout flag, we need more 5 units for the
9792 * new inode (1 inode item, 1 inode ref, 2 dir items and 1 xattr item
9793 * when selinux is enabled).
9795 trans_num_items
= 11;
9796 if (flags
& RENAME_WHITEOUT
)
9797 trans_num_items
+= 5;
9798 trans
= btrfs_start_transaction(root
, trans_num_items
);
9799 if (IS_ERR(trans
)) {
9800 ret
= PTR_ERR(trans
);
9805 btrfs_record_root_in_trans(trans
, dest
);
9807 ret
= btrfs_set_inode_index(BTRFS_I(new_dir
), &index
);
9811 BTRFS_I(old_inode
)->dir_index
= 0ULL;
9812 if (unlikely(old_ino
== BTRFS_FIRST_FREE_OBJECTID
)) {
9813 /* force full log commit if subvolume involved. */
9814 btrfs_set_log_full_commit(fs_info
, trans
);
9816 btrfs_pin_log_trans(root
);
9818 ret
= btrfs_insert_inode_ref(trans
, dest
,
9819 new_dentry
->d_name
.name
,
9820 new_dentry
->d_name
.len
,
9822 btrfs_ino(BTRFS_I(new_dir
)), index
);
9827 inode_inc_iversion(old_dir
);
9828 inode_inc_iversion(new_dir
);
9829 inode_inc_iversion(old_inode
);
9830 old_dir
->i_ctime
= old_dir
->i_mtime
=
9831 new_dir
->i_ctime
= new_dir
->i_mtime
=
9832 old_inode
->i_ctime
= current_time(old_dir
);
9834 if (old_dentry
->d_parent
!= new_dentry
->d_parent
)
9835 btrfs_record_unlink_dir(trans
, BTRFS_I(old_dir
),
9836 BTRFS_I(old_inode
), 1);
9838 if (unlikely(old_ino
== BTRFS_FIRST_FREE_OBJECTID
)) {
9839 root_objectid
= BTRFS_I(old_inode
)->root
->root_key
.objectid
;
9840 ret
= btrfs_unlink_subvol(trans
, root
, old_dir
, root_objectid
,
9841 old_dentry
->d_name
.name
,
9842 old_dentry
->d_name
.len
);
9844 ret
= __btrfs_unlink_inode(trans
, root
, BTRFS_I(old_dir
),
9845 BTRFS_I(d_inode(old_dentry
)),
9846 old_dentry
->d_name
.name
,
9847 old_dentry
->d_name
.len
);
9849 ret
= btrfs_update_inode(trans
, root
, old_inode
);
9852 btrfs_abort_transaction(trans
, ret
);
9857 inode_inc_iversion(new_inode
);
9858 new_inode
->i_ctime
= current_time(new_inode
);
9859 if (unlikely(btrfs_ino(BTRFS_I(new_inode
)) ==
9860 BTRFS_EMPTY_SUBVOL_DIR_OBJECTID
)) {
9861 root_objectid
= BTRFS_I(new_inode
)->location
.objectid
;
9862 ret
= btrfs_unlink_subvol(trans
, dest
, new_dir
,
9864 new_dentry
->d_name
.name
,
9865 new_dentry
->d_name
.len
);
9866 BUG_ON(new_inode
->i_nlink
== 0);
9868 ret
= btrfs_unlink_inode(trans
, dest
, BTRFS_I(new_dir
),
9869 BTRFS_I(d_inode(new_dentry
)),
9870 new_dentry
->d_name
.name
,
9871 new_dentry
->d_name
.len
);
9873 if (!ret
&& new_inode
->i_nlink
== 0)
9874 ret
= btrfs_orphan_add(trans
,
9875 BTRFS_I(d_inode(new_dentry
)));
9877 btrfs_abort_transaction(trans
, ret
);
9882 ret
= btrfs_add_link(trans
, BTRFS_I(new_dir
), BTRFS_I(old_inode
),
9883 new_dentry
->d_name
.name
,
9884 new_dentry
->d_name
.len
, 0, index
);
9886 btrfs_abort_transaction(trans
, ret
);
9890 if (old_inode
->i_nlink
== 1)
9891 BTRFS_I(old_inode
)->dir_index
= index
;
9894 struct dentry
*parent
= new_dentry
->d_parent
;
9896 btrfs_log_new_name(trans
, BTRFS_I(old_inode
), BTRFS_I(old_dir
),
9898 btrfs_end_log_trans(root
);
9902 if (flags
& RENAME_WHITEOUT
) {
9903 ret
= btrfs_whiteout_for_rename(trans
, root
, old_dir
,
9907 btrfs_abort_transaction(trans
, ret
);
9913 * If we have pinned the log and an error happened, we unpin tasks
9914 * trying to sync the log and force them to fallback to a transaction
9915 * commit if the log currently contains any of the inodes involved in
9916 * this rename operation (to ensure we do not persist a log with an
9917 * inconsistent state for any of these inodes or leading to any
9918 * inconsistencies when replayed). If the transaction was aborted, the
9919 * abortion reason is propagated to userspace when attempting to commit
9920 * the transaction. If the log does not contain any of these inodes, we
9921 * allow the tasks to sync it.
9923 if (ret
&& log_pinned
) {
9924 if (btrfs_inode_in_log(BTRFS_I(old_dir
), fs_info
->generation
) ||
9925 btrfs_inode_in_log(BTRFS_I(new_dir
), fs_info
->generation
) ||
9926 btrfs_inode_in_log(BTRFS_I(old_inode
), fs_info
->generation
) ||
9928 btrfs_inode_in_log(BTRFS_I(new_inode
), fs_info
->generation
)))
9929 btrfs_set_log_full_commit(fs_info
, trans
);
9931 btrfs_end_log_trans(root
);
9934 btrfs_end_transaction(trans
);
9936 if (old_ino
== BTRFS_FIRST_FREE_OBJECTID
)
9937 up_read(&fs_info
->subvol_sem
);
9942 static int btrfs_rename2(struct inode
*old_dir
, struct dentry
*old_dentry
,
9943 struct inode
*new_dir
, struct dentry
*new_dentry
,
9946 if (flags
& ~(RENAME_NOREPLACE
| RENAME_EXCHANGE
| RENAME_WHITEOUT
))
9949 if (flags
& RENAME_EXCHANGE
)
9950 return btrfs_rename_exchange(old_dir
, old_dentry
, new_dir
,
9953 return btrfs_rename(old_dir
, old_dentry
, new_dir
, new_dentry
, flags
);
9956 static void btrfs_run_delalloc_work(struct btrfs_work
*work
)
9958 struct btrfs_delalloc_work
*delalloc_work
;
9959 struct inode
*inode
;
9961 delalloc_work
= container_of(work
, struct btrfs_delalloc_work
,
9963 inode
= delalloc_work
->inode
;
9964 filemap_flush(inode
->i_mapping
);
9965 if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT
,
9966 &BTRFS_I(inode
)->runtime_flags
))
9967 filemap_flush(inode
->i_mapping
);
9969 if (delalloc_work
->delay_iput
)
9970 btrfs_add_delayed_iput(inode
);
9973 complete(&delalloc_work
->completion
);
9976 struct btrfs_delalloc_work
*btrfs_alloc_delalloc_work(struct inode
*inode
,
9979 struct btrfs_delalloc_work
*work
;
9981 work
= kmalloc(sizeof(*work
), GFP_NOFS
);
9985 init_completion(&work
->completion
);
9986 INIT_LIST_HEAD(&work
->list
);
9987 work
->inode
= inode
;
9988 work
->delay_iput
= delay_iput
;
9989 WARN_ON_ONCE(!inode
);
9990 btrfs_init_work(&work
->work
, btrfs_flush_delalloc_helper
,
9991 btrfs_run_delalloc_work
, NULL
, NULL
);
9996 void btrfs_wait_and_free_delalloc_work(struct btrfs_delalloc_work
*work
)
9998 wait_for_completion(&work
->completion
);
10003 * some fairly slow code that needs optimization. This walks the list
10004 * of all the inodes with pending delalloc and forces them to disk.
10006 static int __start_delalloc_inodes(struct btrfs_root
*root
, int delay_iput
,
10009 struct btrfs_inode
*binode
;
10010 struct inode
*inode
;
10011 struct btrfs_delalloc_work
*work
, *next
;
10012 struct list_head works
;
10013 struct list_head splice
;
10016 INIT_LIST_HEAD(&works
);
10017 INIT_LIST_HEAD(&splice
);
10019 mutex_lock(&root
->delalloc_mutex
);
10020 spin_lock(&root
->delalloc_lock
);
10021 list_splice_init(&root
->delalloc_inodes
, &splice
);
10022 while (!list_empty(&splice
)) {
10023 binode
= list_entry(splice
.next
, struct btrfs_inode
,
10026 list_move_tail(&binode
->delalloc_inodes
,
10027 &root
->delalloc_inodes
);
10028 inode
= igrab(&binode
->vfs_inode
);
10030 cond_resched_lock(&root
->delalloc_lock
);
10033 spin_unlock(&root
->delalloc_lock
);
10035 work
= btrfs_alloc_delalloc_work(inode
, delay_iput
);
10038 btrfs_add_delayed_iput(inode
);
10044 list_add_tail(&work
->list
, &works
);
10045 btrfs_queue_work(root
->fs_info
->flush_workers
,
10048 if (nr
!= -1 && ret
>= nr
)
10051 spin_lock(&root
->delalloc_lock
);
10053 spin_unlock(&root
->delalloc_lock
);
10056 list_for_each_entry_safe(work
, next
, &works
, list
) {
10057 list_del_init(&work
->list
);
10058 btrfs_wait_and_free_delalloc_work(work
);
10061 if (!list_empty_careful(&splice
)) {
10062 spin_lock(&root
->delalloc_lock
);
10063 list_splice_tail(&splice
, &root
->delalloc_inodes
);
10064 spin_unlock(&root
->delalloc_lock
);
10066 mutex_unlock(&root
->delalloc_mutex
);
10070 int btrfs_start_delalloc_inodes(struct btrfs_root
*root
, int delay_iput
)
10072 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
10075 if (test_bit(BTRFS_FS_STATE_ERROR
, &fs_info
->fs_state
))
10078 ret
= __start_delalloc_inodes(root
, delay_iput
, -1);
10082 * the filemap_flush will queue IO into the worker threads, but
10083 * we have to make sure the IO is actually started and that
10084 * ordered extents get created before we return
10086 atomic_inc(&fs_info
->async_submit_draining
);
10087 while (atomic_read(&fs_info
->nr_async_submits
) ||
10088 atomic_read(&fs_info
->async_delalloc_pages
)) {
10089 wait_event(fs_info
->async_submit_wait
,
10090 (atomic_read(&fs_info
->nr_async_submits
) == 0 &&
10091 atomic_read(&fs_info
->async_delalloc_pages
) == 0));
10093 atomic_dec(&fs_info
->async_submit_draining
);
10097 int btrfs_start_delalloc_roots(struct btrfs_fs_info
*fs_info
, int delay_iput
,
10100 struct btrfs_root
*root
;
10101 struct list_head splice
;
10104 if (test_bit(BTRFS_FS_STATE_ERROR
, &fs_info
->fs_state
))
10107 INIT_LIST_HEAD(&splice
);
10109 mutex_lock(&fs_info
->delalloc_root_mutex
);
10110 spin_lock(&fs_info
->delalloc_root_lock
);
10111 list_splice_init(&fs_info
->delalloc_roots
, &splice
);
10112 while (!list_empty(&splice
) && nr
) {
10113 root
= list_first_entry(&splice
, struct btrfs_root
,
10115 root
= btrfs_grab_fs_root(root
);
10117 list_move_tail(&root
->delalloc_root
,
10118 &fs_info
->delalloc_roots
);
10119 spin_unlock(&fs_info
->delalloc_root_lock
);
10121 ret
= __start_delalloc_inodes(root
, delay_iput
, nr
);
10122 btrfs_put_fs_root(root
);
10130 spin_lock(&fs_info
->delalloc_root_lock
);
10132 spin_unlock(&fs_info
->delalloc_root_lock
);
10135 atomic_inc(&fs_info
->async_submit_draining
);
10136 while (atomic_read(&fs_info
->nr_async_submits
) ||
10137 atomic_read(&fs_info
->async_delalloc_pages
)) {
10138 wait_event(fs_info
->async_submit_wait
,
10139 (atomic_read(&fs_info
->nr_async_submits
) == 0 &&
10140 atomic_read(&fs_info
->async_delalloc_pages
) == 0));
10142 atomic_dec(&fs_info
->async_submit_draining
);
10144 if (!list_empty_careful(&splice
)) {
10145 spin_lock(&fs_info
->delalloc_root_lock
);
10146 list_splice_tail(&splice
, &fs_info
->delalloc_roots
);
10147 spin_unlock(&fs_info
->delalloc_root_lock
);
10149 mutex_unlock(&fs_info
->delalloc_root_mutex
);
10153 static int btrfs_symlink(struct inode
*dir
, struct dentry
*dentry
,
10154 const char *symname
)
10156 struct btrfs_fs_info
*fs_info
= btrfs_sb(dir
->i_sb
);
10157 struct btrfs_trans_handle
*trans
;
10158 struct btrfs_root
*root
= BTRFS_I(dir
)->root
;
10159 struct btrfs_path
*path
;
10160 struct btrfs_key key
;
10161 struct inode
*inode
= NULL
;
10163 int drop_inode
= 0;
10169 struct btrfs_file_extent_item
*ei
;
10170 struct extent_buffer
*leaf
;
10172 name_len
= strlen(symname
);
10173 if (name_len
> BTRFS_MAX_INLINE_DATA_SIZE(fs_info
))
10174 return -ENAMETOOLONG
;
10177 * 2 items for inode item and ref
10178 * 2 items for dir items
10179 * 1 item for updating parent inode item
10180 * 1 item for the inline extent item
10181 * 1 item for xattr if selinux is on
10183 trans
= btrfs_start_transaction(root
, 7);
10185 return PTR_ERR(trans
);
10187 err
= btrfs_find_free_ino(root
, &objectid
);
10191 inode
= btrfs_new_inode(trans
, root
, dir
, dentry
->d_name
.name
,
10192 dentry
->d_name
.len
, btrfs_ino(BTRFS_I(dir
)),
10193 objectid
, S_IFLNK
|S_IRWXUGO
, &index
);
10194 if (IS_ERR(inode
)) {
10195 err
= PTR_ERR(inode
);
10200 * If the active LSM wants to access the inode during
10201 * d_instantiate it needs these. Smack checks to see
10202 * if the filesystem supports xattrs by looking at the
10205 inode
->i_fop
= &btrfs_file_operations
;
10206 inode
->i_op
= &btrfs_file_inode_operations
;
10207 inode
->i_mapping
->a_ops
= &btrfs_aops
;
10208 BTRFS_I(inode
)->io_tree
.ops
= &btrfs_extent_io_ops
;
10210 err
= btrfs_init_inode_security(trans
, inode
, dir
, &dentry
->d_name
);
10212 goto out_unlock_inode
;
10214 path
= btrfs_alloc_path();
10217 goto out_unlock_inode
;
10219 key
.objectid
= btrfs_ino(BTRFS_I(inode
));
10221 key
.type
= BTRFS_EXTENT_DATA_KEY
;
10222 datasize
= btrfs_file_extent_calc_inline_size(name_len
);
10223 err
= btrfs_insert_empty_item(trans
, root
, path
, &key
,
10226 btrfs_free_path(path
);
10227 goto out_unlock_inode
;
10229 leaf
= path
->nodes
[0];
10230 ei
= btrfs_item_ptr(leaf
, path
->slots
[0],
10231 struct btrfs_file_extent_item
);
10232 btrfs_set_file_extent_generation(leaf
, ei
, trans
->transid
);
10233 btrfs_set_file_extent_type(leaf
, ei
,
10234 BTRFS_FILE_EXTENT_INLINE
);
10235 btrfs_set_file_extent_encryption(leaf
, ei
, 0);
10236 btrfs_set_file_extent_compression(leaf
, ei
, 0);
10237 btrfs_set_file_extent_other_encoding(leaf
, ei
, 0);
10238 btrfs_set_file_extent_ram_bytes(leaf
, ei
, name_len
);
10240 ptr
= btrfs_file_extent_inline_start(ei
);
10241 write_extent_buffer(leaf
, symname
, ptr
, name_len
);
10242 btrfs_mark_buffer_dirty(leaf
);
10243 btrfs_free_path(path
);
10245 inode
->i_op
= &btrfs_symlink_inode_operations
;
10246 inode_nohighmem(inode
);
10247 inode
->i_mapping
->a_ops
= &btrfs_symlink_aops
;
10248 inode_set_bytes(inode
, name_len
);
10249 btrfs_i_size_write(BTRFS_I(inode
), name_len
);
10250 err
= btrfs_update_inode(trans
, root
, inode
);
10252 * Last step, add directory indexes for our symlink inode. This is the
10253 * last step to avoid extra cleanup of these indexes if an error happens
10257 err
= btrfs_add_nondir(trans
, BTRFS_I(dir
), dentry
,
10258 BTRFS_I(inode
), 0, index
);
10261 goto out_unlock_inode
;
10264 unlock_new_inode(inode
);
10265 d_instantiate(dentry
, inode
);
10268 btrfs_end_transaction(trans
);
10270 inode_dec_link_count(inode
);
10273 btrfs_btree_balance_dirty(fs_info
);
10278 unlock_new_inode(inode
);
10282 static int __btrfs_prealloc_file_range(struct inode
*inode
, int mode
,
10283 u64 start
, u64 num_bytes
, u64 min_size
,
10284 loff_t actual_len
, u64
*alloc_hint
,
10285 struct btrfs_trans_handle
*trans
)
10287 struct btrfs_fs_info
*fs_info
= btrfs_sb(inode
->i_sb
);
10288 struct extent_map_tree
*em_tree
= &BTRFS_I(inode
)->extent_tree
;
10289 struct extent_map
*em
;
10290 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
10291 struct btrfs_key ins
;
10292 u64 cur_offset
= start
;
10295 u64 last_alloc
= (u64
)-1;
10297 bool own_trans
= true;
10298 u64 end
= start
+ num_bytes
- 1;
10302 while (num_bytes
> 0) {
10304 trans
= btrfs_start_transaction(root
, 3);
10305 if (IS_ERR(trans
)) {
10306 ret
= PTR_ERR(trans
);
10311 cur_bytes
= min_t(u64
, num_bytes
, SZ_256M
);
10312 cur_bytes
= max(cur_bytes
, min_size
);
10314 * If we are severely fragmented we could end up with really
10315 * small allocations, so if the allocator is returning small
10316 * chunks lets make its job easier by only searching for those
10319 cur_bytes
= min(cur_bytes
, last_alloc
);
10320 ret
= btrfs_reserve_extent(root
, cur_bytes
, cur_bytes
,
10321 min_size
, 0, *alloc_hint
, &ins
, 1, 0);
10324 btrfs_end_transaction(trans
);
10327 btrfs_dec_block_group_reservations(fs_info
, ins
.objectid
);
10329 last_alloc
= ins
.offset
;
10330 ret
= insert_reserved_file_extent(trans
, inode
,
10331 cur_offset
, ins
.objectid
,
10332 ins
.offset
, ins
.offset
,
10333 ins
.offset
, 0, 0, 0,
10334 BTRFS_FILE_EXTENT_PREALLOC
);
10336 btrfs_free_reserved_extent(fs_info
, ins
.objectid
,
10338 btrfs_abort_transaction(trans
, ret
);
10340 btrfs_end_transaction(trans
);
10344 btrfs_drop_extent_cache(BTRFS_I(inode
), cur_offset
,
10345 cur_offset
+ ins
.offset
-1, 0);
10347 em
= alloc_extent_map();
10349 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC
,
10350 &BTRFS_I(inode
)->runtime_flags
);
10354 em
->start
= cur_offset
;
10355 em
->orig_start
= cur_offset
;
10356 em
->len
= ins
.offset
;
10357 em
->block_start
= ins
.objectid
;
10358 em
->block_len
= ins
.offset
;
10359 em
->orig_block_len
= ins
.offset
;
10360 em
->ram_bytes
= ins
.offset
;
10361 em
->bdev
= fs_info
->fs_devices
->latest_bdev
;
10362 set_bit(EXTENT_FLAG_PREALLOC
, &em
->flags
);
10363 em
->generation
= trans
->transid
;
10366 write_lock(&em_tree
->lock
);
10367 ret
= add_extent_mapping(em_tree
, em
, 1);
10368 write_unlock(&em_tree
->lock
);
10369 if (ret
!= -EEXIST
)
10371 btrfs_drop_extent_cache(BTRFS_I(inode
), cur_offset
,
10372 cur_offset
+ ins
.offset
- 1,
10375 free_extent_map(em
);
10377 num_bytes
-= ins
.offset
;
10378 cur_offset
+= ins
.offset
;
10379 *alloc_hint
= ins
.objectid
+ ins
.offset
;
10381 inode_inc_iversion(inode
);
10382 inode
->i_ctime
= current_time(inode
);
10383 BTRFS_I(inode
)->flags
|= BTRFS_INODE_PREALLOC
;
10384 if (!(mode
& FALLOC_FL_KEEP_SIZE
) &&
10385 (actual_len
> inode
->i_size
) &&
10386 (cur_offset
> inode
->i_size
)) {
10387 if (cur_offset
> actual_len
)
10388 i_size
= actual_len
;
10390 i_size
= cur_offset
;
10391 i_size_write(inode
, i_size
);
10392 btrfs_ordered_update_i_size(inode
, i_size
, NULL
);
10395 ret
= btrfs_update_inode(trans
, root
, inode
);
10398 btrfs_abort_transaction(trans
, ret
);
10400 btrfs_end_transaction(trans
);
10405 btrfs_end_transaction(trans
);
10407 if (cur_offset
< end
)
10408 btrfs_free_reserved_data_space(inode
, cur_offset
,
10409 end
- cur_offset
+ 1);
10413 int btrfs_prealloc_file_range(struct inode
*inode
, int mode
,
10414 u64 start
, u64 num_bytes
, u64 min_size
,
10415 loff_t actual_len
, u64
*alloc_hint
)
10417 return __btrfs_prealloc_file_range(inode
, mode
, start
, num_bytes
,
10418 min_size
, actual_len
, alloc_hint
,
10422 int btrfs_prealloc_file_range_trans(struct inode
*inode
,
10423 struct btrfs_trans_handle
*trans
, int mode
,
10424 u64 start
, u64 num_bytes
, u64 min_size
,
10425 loff_t actual_len
, u64
*alloc_hint
)
10427 return __btrfs_prealloc_file_range(inode
, mode
, start
, num_bytes
,
10428 min_size
, actual_len
, alloc_hint
, trans
);
10431 static int btrfs_set_page_dirty(struct page
*page
)
10433 return __set_page_dirty_nobuffers(page
);
10436 static int btrfs_permission(struct inode
*inode
, int mask
)
10438 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
10439 umode_t mode
= inode
->i_mode
;
10441 if (mask
& MAY_WRITE
&&
10442 (S_ISREG(mode
) || S_ISDIR(mode
) || S_ISLNK(mode
))) {
10443 if (btrfs_root_readonly(root
))
10445 if (BTRFS_I(inode
)->flags
& BTRFS_INODE_READONLY
)
10448 return generic_permission(inode
, mask
);
10451 static int btrfs_tmpfile(struct inode
*dir
, struct dentry
*dentry
, umode_t mode
)
10453 struct btrfs_fs_info
*fs_info
= btrfs_sb(dir
->i_sb
);
10454 struct btrfs_trans_handle
*trans
;
10455 struct btrfs_root
*root
= BTRFS_I(dir
)->root
;
10456 struct inode
*inode
= NULL
;
10462 * 5 units required for adding orphan entry
10464 trans
= btrfs_start_transaction(root
, 5);
10466 return PTR_ERR(trans
);
10468 ret
= btrfs_find_free_ino(root
, &objectid
);
10472 inode
= btrfs_new_inode(trans
, root
, dir
, NULL
, 0,
10473 btrfs_ino(BTRFS_I(dir
)), objectid
, mode
, &index
);
10474 if (IS_ERR(inode
)) {
10475 ret
= PTR_ERR(inode
);
10480 inode
->i_fop
= &btrfs_file_operations
;
10481 inode
->i_op
= &btrfs_file_inode_operations
;
10483 inode
->i_mapping
->a_ops
= &btrfs_aops
;
10484 BTRFS_I(inode
)->io_tree
.ops
= &btrfs_extent_io_ops
;
10486 ret
= btrfs_init_inode_security(trans
, inode
, dir
, NULL
);
10490 ret
= btrfs_update_inode(trans
, root
, inode
);
10493 ret
= btrfs_orphan_add(trans
, BTRFS_I(inode
));
10498 * We set number of links to 0 in btrfs_new_inode(), and here we set
10499 * it to 1 because d_tmpfile() will issue a warning if the count is 0,
10502 * d_tmpfile() -> inode_dec_link_count() -> drop_nlink()
10504 set_nlink(inode
, 1);
10505 unlock_new_inode(inode
);
10506 d_tmpfile(dentry
, inode
);
10507 mark_inode_dirty(inode
);
10510 btrfs_end_transaction(trans
);
10513 btrfs_balance_delayed_items(fs_info
);
10514 btrfs_btree_balance_dirty(fs_info
);
10518 unlock_new_inode(inode
);
10523 __attribute__((const))
10524 static int btrfs_readpage_io_failed_hook(struct page
*page
, int failed_mirror
)
10529 static const struct inode_operations btrfs_dir_inode_operations
= {
10530 .getattr
= btrfs_getattr
,
10531 .lookup
= btrfs_lookup
,
10532 .create
= btrfs_create
,
10533 .unlink
= btrfs_unlink
,
10534 .link
= btrfs_link
,
10535 .mkdir
= btrfs_mkdir
,
10536 .rmdir
= btrfs_rmdir
,
10537 .rename
= btrfs_rename2
,
10538 .symlink
= btrfs_symlink
,
10539 .setattr
= btrfs_setattr
,
10540 .mknod
= btrfs_mknod
,
10541 .listxattr
= btrfs_listxattr
,
10542 .permission
= btrfs_permission
,
10543 .get_acl
= btrfs_get_acl
,
10544 .set_acl
= btrfs_set_acl
,
10545 .update_time
= btrfs_update_time
,
10546 .tmpfile
= btrfs_tmpfile
,
10548 static const struct inode_operations btrfs_dir_ro_inode_operations
= {
10549 .lookup
= btrfs_lookup
,
10550 .permission
= btrfs_permission
,
10551 .update_time
= btrfs_update_time
,
10554 static const struct file_operations btrfs_dir_file_operations
= {
10555 .llseek
= generic_file_llseek
,
10556 .read
= generic_read_dir
,
10557 .iterate_shared
= btrfs_real_readdir
,
10558 .unlocked_ioctl
= btrfs_ioctl
,
10559 #ifdef CONFIG_COMPAT
10560 .compat_ioctl
= btrfs_compat_ioctl
,
10562 .release
= btrfs_release_file
,
10563 .fsync
= btrfs_sync_file
,
10566 static const struct extent_io_ops btrfs_extent_io_ops
= {
10567 /* mandatory callbacks */
10568 .submit_bio_hook
= btrfs_submit_bio_hook
,
10569 .readpage_end_io_hook
= btrfs_readpage_end_io_hook
,
10570 .merge_bio_hook
= btrfs_merge_bio_hook
,
10571 .readpage_io_failed_hook
= btrfs_readpage_io_failed_hook
,
10573 /* optional callbacks */
10574 .fill_delalloc
= run_delalloc_range
,
10575 .writepage_end_io_hook
= btrfs_writepage_end_io_hook
,
10576 .writepage_start_hook
= btrfs_writepage_start_hook
,
10577 .set_bit_hook
= btrfs_set_bit_hook
,
10578 .clear_bit_hook
= btrfs_clear_bit_hook
,
10579 .merge_extent_hook
= btrfs_merge_extent_hook
,
10580 .split_extent_hook
= btrfs_split_extent_hook
,
10584 * btrfs doesn't support the bmap operation because swapfiles
10585 * use bmap to make a mapping of extents in the file. They assume
10586 * these extents won't change over the life of the file and they
10587 * use the bmap result to do IO directly to the drive.
10589 * the btrfs bmap call would return logical addresses that aren't
10590 * suitable for IO and they also will change frequently as COW
10591 * operations happen. So, swapfile + btrfs == corruption.
10593 * For now we're avoiding this by dropping bmap.
10595 static const struct address_space_operations btrfs_aops
= {
10596 .readpage
= btrfs_readpage
,
10597 .writepage
= btrfs_writepage
,
10598 .writepages
= btrfs_writepages
,
10599 .readpages
= btrfs_readpages
,
10600 .direct_IO
= btrfs_direct_IO
,
10601 .invalidatepage
= btrfs_invalidatepage
,
10602 .releasepage
= btrfs_releasepage
,
10603 .set_page_dirty
= btrfs_set_page_dirty
,
10604 .error_remove_page
= generic_error_remove_page
,
10607 static const struct address_space_operations btrfs_symlink_aops
= {
10608 .readpage
= btrfs_readpage
,
10609 .writepage
= btrfs_writepage
,
10610 .invalidatepage
= btrfs_invalidatepage
,
10611 .releasepage
= btrfs_releasepage
,
10614 static const struct inode_operations btrfs_file_inode_operations
= {
10615 .getattr
= btrfs_getattr
,
10616 .setattr
= btrfs_setattr
,
10617 .listxattr
= btrfs_listxattr
,
10618 .permission
= btrfs_permission
,
10619 .fiemap
= btrfs_fiemap
,
10620 .get_acl
= btrfs_get_acl
,
10621 .set_acl
= btrfs_set_acl
,
10622 .update_time
= btrfs_update_time
,
10624 static const struct inode_operations btrfs_special_inode_operations
= {
10625 .getattr
= btrfs_getattr
,
10626 .setattr
= btrfs_setattr
,
10627 .permission
= btrfs_permission
,
10628 .listxattr
= btrfs_listxattr
,
10629 .get_acl
= btrfs_get_acl
,
10630 .set_acl
= btrfs_set_acl
,
10631 .update_time
= btrfs_update_time
,
10633 static const struct inode_operations btrfs_symlink_inode_operations
= {
10634 .get_link
= page_get_link
,
10635 .getattr
= btrfs_getattr
,
10636 .setattr
= btrfs_setattr
,
10637 .permission
= btrfs_permission
,
10638 .listxattr
= btrfs_listxattr
,
10639 .update_time
= btrfs_update_time
,
10642 const struct dentry_operations btrfs_dentry_operations
= {
10643 .d_delete
= btrfs_dentry_delete
,
10644 .d_release
= btrfs_dentry_release
,