2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/bio.h>
20 #include <linux/slab.h>
21 #include <linux/pagemap.h>
22 #include <linux/highmem.h>
25 #include "transaction.h"
27 #include "print-tree.h"
29 #define __MAX_CSUM_ITEMS(r, size) ((unsigned long)(((BTRFS_LEAF_DATA_SIZE(r) - \
30 sizeof(struct btrfs_item) * 2) / \
33 #define MAX_CSUM_ITEMS(r, size) (min_t(u32, __MAX_CSUM_ITEMS(r, size), \
36 #define MAX_ORDERED_SUM_BYTES(r) ((PAGE_SIZE - \
37 sizeof(struct btrfs_ordered_sum)) / \
38 sizeof(u32) * (r)->sectorsize)
40 int btrfs_insert_file_extent(struct btrfs_trans_handle
*trans
,
41 struct btrfs_root
*root
,
42 u64 objectid
, u64 pos
,
43 u64 disk_offset
, u64 disk_num_bytes
,
44 u64 num_bytes
, u64 offset
, u64 ram_bytes
,
45 u8 compression
, u8 encryption
, u16 other_encoding
)
48 struct btrfs_file_extent_item
*item
;
49 struct btrfs_key file_key
;
50 struct btrfs_path
*path
;
51 struct extent_buffer
*leaf
;
53 path
= btrfs_alloc_path();
56 file_key
.objectid
= objectid
;
57 file_key
.offset
= pos
;
58 btrfs_set_key_type(&file_key
, BTRFS_EXTENT_DATA_KEY
);
60 path
->leave_spinning
= 1;
61 ret
= btrfs_insert_empty_item(trans
, root
, path
, &file_key
,
65 BUG_ON(ret
); /* Can't happen */
66 leaf
= path
->nodes
[0];
67 item
= btrfs_item_ptr(leaf
, path
->slots
[0],
68 struct btrfs_file_extent_item
);
69 btrfs_set_file_extent_disk_bytenr(leaf
, item
, disk_offset
);
70 btrfs_set_file_extent_disk_num_bytes(leaf
, item
, disk_num_bytes
);
71 btrfs_set_file_extent_offset(leaf
, item
, offset
);
72 btrfs_set_file_extent_num_bytes(leaf
, item
, num_bytes
);
73 btrfs_set_file_extent_ram_bytes(leaf
, item
, ram_bytes
);
74 btrfs_set_file_extent_generation(leaf
, item
, trans
->transid
);
75 btrfs_set_file_extent_type(leaf
, item
, BTRFS_FILE_EXTENT_REG
);
76 btrfs_set_file_extent_compression(leaf
, item
, compression
);
77 btrfs_set_file_extent_encryption(leaf
, item
, encryption
);
78 btrfs_set_file_extent_other_encoding(leaf
, item
, other_encoding
);
80 btrfs_mark_buffer_dirty(leaf
);
82 btrfs_free_path(path
);
86 static struct btrfs_csum_item
*
87 btrfs_lookup_csum(struct btrfs_trans_handle
*trans
,
88 struct btrfs_root
*root
,
89 struct btrfs_path
*path
,
93 struct btrfs_key file_key
;
94 struct btrfs_key found_key
;
95 struct btrfs_csum_item
*item
;
96 struct extent_buffer
*leaf
;
98 u16 csum_size
= btrfs_super_csum_size(root
->fs_info
->super_copy
);
101 file_key
.objectid
= BTRFS_EXTENT_CSUM_OBJECTID
;
102 file_key
.offset
= bytenr
;
103 btrfs_set_key_type(&file_key
, BTRFS_EXTENT_CSUM_KEY
);
104 ret
= btrfs_search_slot(trans
, root
, &file_key
, path
, 0, cow
);
107 leaf
= path
->nodes
[0];
110 if (path
->slots
[0] == 0)
113 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
114 if (btrfs_key_type(&found_key
) != BTRFS_EXTENT_CSUM_KEY
)
117 csum_offset
= (bytenr
- found_key
.offset
) >>
118 root
->fs_info
->sb
->s_blocksize_bits
;
119 csums_in_item
= btrfs_item_size_nr(leaf
, path
->slots
[0]);
120 csums_in_item
/= csum_size
;
122 if (csum_offset
== csums_in_item
) {
125 } else if (csum_offset
> csums_in_item
) {
129 item
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_csum_item
);
130 item
= (struct btrfs_csum_item
*)((unsigned char *)item
+
131 csum_offset
* csum_size
);
139 int btrfs_lookup_file_extent(struct btrfs_trans_handle
*trans
,
140 struct btrfs_root
*root
,
141 struct btrfs_path
*path
, u64 objectid
,
145 struct btrfs_key file_key
;
146 int ins_len
= mod
< 0 ? -1 : 0;
149 file_key
.objectid
= objectid
;
150 file_key
.offset
= offset
;
151 btrfs_set_key_type(&file_key
, BTRFS_EXTENT_DATA_KEY
);
152 ret
= btrfs_search_slot(trans
, root
, &file_key
, path
, ins_len
, cow
);
156 static void btrfs_io_bio_endio_readpage(struct btrfs_io_bio
*bio
, int err
)
158 kfree(bio
->csum_allocated
);
161 static int __btrfs_lookup_bio_sums(struct btrfs_root
*root
,
162 struct inode
*inode
, struct bio
*bio
,
163 u64 logical_offset
, u32
*dst
, int dio
)
165 struct bio_vec
*bvec
= bio
->bi_io_vec
;
166 struct btrfs_io_bio
*btrfs_bio
= btrfs_io_bio(bio
);
167 struct btrfs_csum_item
*item
= NULL
;
168 struct extent_io_tree
*io_tree
= &BTRFS_I(inode
)->io_tree
;
169 struct btrfs_path
*path
;
172 u64 item_start_offset
= 0;
173 u64 item_last_offset
= 0;
179 u16 csum_size
= btrfs_super_csum_size(root
->fs_info
->super_copy
);
181 path
= btrfs_alloc_path();
185 nblocks
= bio
->bi_iter
.bi_size
>> inode
->i_sb
->s_blocksize_bits
;
187 if (nblocks
* csum_size
> BTRFS_BIO_INLINE_CSUM_SIZE
) {
188 btrfs_bio
->csum_allocated
= kmalloc(nblocks
* csum_size
,
190 if (!btrfs_bio
->csum_allocated
) {
191 btrfs_free_path(path
);
194 btrfs_bio
->csum
= btrfs_bio
->csum_allocated
;
195 btrfs_bio
->end_io
= btrfs_io_bio_endio_readpage
;
197 btrfs_bio
->csum
= btrfs_bio
->csum_inline
;
199 csum
= btrfs_bio
->csum
;
204 if (bio
->bi_iter
.bi_size
> PAGE_CACHE_SIZE
* 8)
207 WARN_ON(bio
->bi_vcnt
<= 0);
210 * the free space stuff is only read when it hasn't been
211 * updated in the current transaction. So, we can safely
212 * read from the commit root and sidestep a nasty deadlock
213 * between reading the free space cache and updating the csum tree.
215 if (btrfs_is_free_space_inode(inode
)) {
216 path
->search_commit_root
= 1;
217 path
->skip_locking
= 1;
220 disk_bytenr
= (u64
)bio
->bi_iter
.bi_sector
<< 9;
222 offset
= logical_offset
;
223 while (bio_index
< bio
->bi_vcnt
) {
225 offset
= page_offset(bvec
->bv_page
) + bvec
->bv_offset
;
226 count
= btrfs_find_ordered_sum(inode
, offset
, disk_bytenr
,
227 (u32
*)csum
, nblocks
);
231 if (!item
|| disk_bytenr
< item_start_offset
||
232 disk_bytenr
>= item_last_offset
) {
233 struct btrfs_key found_key
;
237 btrfs_release_path(path
);
238 item
= btrfs_lookup_csum(NULL
, root
->fs_info
->csum_root
,
239 path
, disk_bytenr
, 0);
242 memset(csum
, 0, csum_size
);
243 if (BTRFS_I(inode
)->root
->root_key
.objectid
==
244 BTRFS_DATA_RELOC_TREE_OBJECTID
) {
245 set_extent_bits(io_tree
, offset
,
246 offset
+ bvec
->bv_len
- 1,
247 EXTENT_NODATASUM
, GFP_NOFS
);
249 btrfs_info(BTRFS_I(inode
)->root
->fs_info
,
250 "no csum found for inode %llu start %llu",
251 btrfs_ino(inode
), offset
);
254 btrfs_release_path(path
);
257 btrfs_item_key_to_cpu(path
->nodes
[0], &found_key
,
260 item_start_offset
= found_key
.offset
;
261 item_size
= btrfs_item_size_nr(path
->nodes
[0],
263 item_last_offset
= item_start_offset
+
264 (item_size
/ csum_size
) *
266 item
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
267 struct btrfs_csum_item
);
270 * this byte range must be able to fit inside
271 * a single leaf so it will also fit inside a u32
273 diff
= disk_bytenr
- item_start_offset
;
274 diff
= diff
/ root
->sectorsize
;
275 diff
= diff
* csum_size
;
276 count
= min_t(int, nblocks
, (item_last_offset
- disk_bytenr
) >>
277 inode
->i_sb
->s_blocksize_bits
);
278 read_extent_buffer(path
->nodes
[0], csum
,
279 ((unsigned long)item
) + diff
,
282 csum
+= count
* csum_size
;
286 disk_bytenr
+= bvec
->bv_len
;
287 offset
+= bvec
->bv_len
;
291 btrfs_free_path(path
);
295 int btrfs_lookup_bio_sums(struct btrfs_root
*root
, struct inode
*inode
,
296 struct bio
*bio
, u32
*dst
)
298 return __btrfs_lookup_bio_sums(root
, inode
, bio
, 0, dst
, 0);
301 int btrfs_lookup_bio_sums_dio(struct btrfs_root
*root
, struct inode
*inode
,
302 struct btrfs_dio_private
*dip
, struct bio
*bio
,
305 int len
= (bio
->bi_iter
.bi_sector
<< 9) - dip
->disk_bytenr
;
306 u16 csum_size
= btrfs_super_csum_size(root
->fs_info
->super_copy
);
309 len
>>= inode
->i_sb
->s_blocksize_bits
;
312 ret
= __btrfs_lookup_bio_sums(root
, inode
, bio
, offset
,
313 (u32
*)(dip
->csum
+ len
), 1);
317 int btrfs_lookup_csums_range(struct btrfs_root
*root
, u64 start
, u64 end
,
318 struct list_head
*list
, int search_commit
)
320 struct btrfs_key key
;
321 struct btrfs_path
*path
;
322 struct extent_buffer
*leaf
;
323 struct btrfs_ordered_sum
*sums
;
324 struct btrfs_csum_item
*item
;
326 unsigned long offset
;
330 u16 csum_size
= btrfs_super_csum_size(root
->fs_info
->super_copy
);
332 ASSERT(start
== ALIGN(start
, root
->sectorsize
) &&
333 (end
+ 1) == ALIGN(end
+ 1, root
->sectorsize
));
335 path
= btrfs_alloc_path();
340 path
->skip_locking
= 1;
342 path
->search_commit_root
= 1;
345 key
.objectid
= BTRFS_EXTENT_CSUM_OBJECTID
;
347 key
.type
= BTRFS_EXTENT_CSUM_KEY
;
349 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
352 if (ret
> 0 && path
->slots
[0] > 0) {
353 leaf
= path
->nodes
[0];
354 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0] - 1);
355 if (key
.objectid
== BTRFS_EXTENT_CSUM_OBJECTID
&&
356 key
.type
== BTRFS_EXTENT_CSUM_KEY
) {
357 offset
= (start
- key
.offset
) >>
358 root
->fs_info
->sb
->s_blocksize_bits
;
359 if (offset
* csum_size
<
360 btrfs_item_size_nr(leaf
, path
->slots
[0] - 1))
365 while (start
<= end
) {
366 leaf
= path
->nodes
[0];
367 if (path
->slots
[0] >= btrfs_header_nritems(leaf
)) {
368 ret
= btrfs_next_leaf(root
, path
);
373 leaf
= path
->nodes
[0];
376 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
377 if (key
.objectid
!= BTRFS_EXTENT_CSUM_OBJECTID
||
378 key
.type
!= BTRFS_EXTENT_CSUM_KEY
||
382 if (key
.offset
> start
)
385 size
= btrfs_item_size_nr(leaf
, path
->slots
[0]);
386 csum_end
= key
.offset
+ (size
/ csum_size
) * root
->sectorsize
;
387 if (csum_end
<= start
) {
392 csum_end
= min(csum_end
, end
+ 1);
393 item
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
394 struct btrfs_csum_item
);
395 while (start
< csum_end
) {
396 size
= min_t(size_t, csum_end
- start
,
397 MAX_ORDERED_SUM_BYTES(root
));
398 sums
= kzalloc(btrfs_ordered_sum_size(root
, size
),
405 sums
->bytenr
= start
;
406 sums
->len
= (int)size
;
408 offset
= (start
- key
.offset
) >>
409 root
->fs_info
->sb
->s_blocksize_bits
;
411 size
>>= root
->fs_info
->sb
->s_blocksize_bits
;
413 read_extent_buffer(path
->nodes
[0],
415 ((unsigned long)item
) + offset
,
418 start
+= root
->sectorsize
* size
;
419 list_add_tail(&sums
->list
, &tmplist
);
425 while (ret
< 0 && !list_empty(&tmplist
)) {
426 sums
= list_entry(&tmplist
, struct btrfs_ordered_sum
, list
);
427 list_del(&sums
->list
);
430 list_splice_tail(&tmplist
, list
);
432 btrfs_free_path(path
);
436 int btrfs_csum_one_bio(struct btrfs_root
*root
, struct inode
*inode
,
437 struct bio
*bio
, u64 file_start
, int contig
)
439 struct btrfs_ordered_sum
*sums
;
440 struct btrfs_ordered_extent
*ordered
;
442 struct bio_vec
*bvec
= bio
->bi_io_vec
;
445 unsigned long total_bytes
= 0;
446 unsigned long this_sum_bytes
= 0;
449 WARN_ON(bio
->bi_vcnt
<= 0);
450 sums
= kzalloc(btrfs_ordered_sum_size(root
, bio
->bi_iter
.bi_size
),
455 sums
->len
= bio
->bi_iter
.bi_size
;
456 INIT_LIST_HEAD(&sums
->list
);
461 offset
= page_offset(bvec
->bv_page
) + bvec
->bv_offset
;
463 ordered
= btrfs_lookup_ordered_extent(inode
, offset
);
464 BUG_ON(!ordered
); /* Logic error */
465 sums
->bytenr
= (u64
)bio
->bi_iter
.bi_sector
<< 9;
468 while (bio_index
< bio
->bi_vcnt
) {
470 offset
= page_offset(bvec
->bv_page
) + bvec
->bv_offset
;
472 if (offset
>= ordered
->file_offset
+ ordered
->len
||
473 offset
< ordered
->file_offset
) {
474 unsigned long bytes_left
;
475 sums
->len
= this_sum_bytes
;
477 btrfs_add_ordered_sum(inode
, ordered
, sums
);
478 btrfs_put_ordered_extent(ordered
);
480 bytes_left
= bio
->bi_iter
.bi_size
- total_bytes
;
482 sums
= kzalloc(btrfs_ordered_sum_size(root
, bytes_left
),
484 BUG_ON(!sums
); /* -ENOMEM */
485 sums
->len
= bytes_left
;
486 ordered
= btrfs_lookup_ordered_extent(inode
, offset
);
487 BUG_ON(!ordered
); /* Logic error */
488 sums
->bytenr
= ((u64
)bio
->bi_iter
.bi_sector
<< 9) +
493 data
= kmap_atomic(bvec
->bv_page
);
494 sums
->sums
[index
] = ~(u32
)0;
495 sums
->sums
[index
] = btrfs_csum_data(data
+ bvec
->bv_offset
,
499 btrfs_csum_final(sums
->sums
[index
],
500 (char *)(sums
->sums
+ index
));
504 total_bytes
+= bvec
->bv_len
;
505 this_sum_bytes
+= bvec
->bv_len
;
506 offset
+= bvec
->bv_len
;
510 btrfs_add_ordered_sum(inode
, ordered
, sums
);
511 btrfs_put_ordered_extent(ordered
);
516 * helper function for csum removal, this expects the
517 * key to describe the csum pointed to by the path, and it expects
518 * the csum to overlap the range [bytenr, len]
520 * The csum should not be entirely contained in the range and the
521 * range should not be entirely contained in the csum.
523 * This calls btrfs_truncate_item with the correct args based on the
524 * overlap, and fixes up the key as required.
526 static noinline
void truncate_one_csum(struct btrfs_root
*root
,
527 struct btrfs_path
*path
,
528 struct btrfs_key
*key
,
531 struct extent_buffer
*leaf
;
532 u16 csum_size
= btrfs_super_csum_size(root
->fs_info
->super_copy
);
534 u64 end_byte
= bytenr
+ len
;
535 u32 blocksize_bits
= root
->fs_info
->sb
->s_blocksize_bits
;
537 leaf
= path
->nodes
[0];
538 csum_end
= btrfs_item_size_nr(leaf
, path
->slots
[0]) / csum_size
;
539 csum_end
<<= root
->fs_info
->sb
->s_blocksize_bits
;
540 csum_end
+= key
->offset
;
542 if (key
->offset
< bytenr
&& csum_end
<= end_byte
) {
547 * A simple truncate off the end of the item
549 u32 new_size
= (bytenr
- key
->offset
) >> blocksize_bits
;
550 new_size
*= csum_size
;
551 btrfs_truncate_item(root
, path
, new_size
, 1);
552 } else if (key
->offset
>= bytenr
&& csum_end
> end_byte
&&
553 end_byte
> key
->offset
) {
558 * we need to truncate from the beginning of the csum
560 u32 new_size
= (csum_end
- end_byte
) >> blocksize_bits
;
561 new_size
*= csum_size
;
563 btrfs_truncate_item(root
, path
, new_size
, 0);
565 key
->offset
= end_byte
;
566 btrfs_set_item_key_safe(root
, path
, key
);
573 * deletes the csum items from the csum tree for a given
576 int btrfs_del_csums(struct btrfs_trans_handle
*trans
,
577 struct btrfs_root
*root
, u64 bytenr
, u64 len
)
579 struct btrfs_path
*path
;
580 struct btrfs_key key
;
581 u64 end_byte
= bytenr
+ len
;
583 struct extent_buffer
*leaf
;
585 u16 csum_size
= btrfs_super_csum_size(root
->fs_info
->super_copy
);
586 int blocksize_bits
= root
->fs_info
->sb
->s_blocksize_bits
;
588 root
= root
->fs_info
->csum_root
;
590 path
= btrfs_alloc_path();
595 key
.objectid
= BTRFS_EXTENT_CSUM_OBJECTID
;
596 key
.offset
= end_byte
- 1;
597 key
.type
= BTRFS_EXTENT_CSUM_KEY
;
599 path
->leave_spinning
= 1;
600 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
602 if (path
->slots
[0] == 0)
605 } else if (ret
< 0) {
609 leaf
= path
->nodes
[0];
610 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
612 if (key
.objectid
!= BTRFS_EXTENT_CSUM_OBJECTID
||
613 key
.type
!= BTRFS_EXTENT_CSUM_KEY
) {
617 if (key
.offset
>= end_byte
)
620 csum_end
= btrfs_item_size_nr(leaf
, path
->slots
[0]) / csum_size
;
621 csum_end
<<= blocksize_bits
;
622 csum_end
+= key
.offset
;
624 /* this csum ends before we start, we're done */
625 if (csum_end
<= bytenr
)
628 /* delete the entire item, it is inside our range */
629 if (key
.offset
>= bytenr
&& csum_end
<= end_byte
) {
630 ret
= btrfs_del_item(trans
, root
, path
);
633 if (key
.offset
== bytenr
)
635 } else if (key
.offset
< bytenr
&& csum_end
> end_byte
) {
636 unsigned long offset
;
637 unsigned long shift_len
;
638 unsigned long item_offset
;
643 * Our bytes are in the middle of the csum,
644 * we need to split this item and insert a new one.
646 * But we can't drop the path because the
647 * csum could change, get removed, extended etc.
649 * The trick here is the max size of a csum item leaves
650 * enough room in the tree block for a single
651 * item header. So, we split the item in place,
652 * adding a new header pointing to the existing
653 * bytes. Then we loop around again and we have
654 * a nicely formed csum item that we can neatly
657 offset
= (bytenr
- key
.offset
) >> blocksize_bits
;
660 shift_len
= (len
>> blocksize_bits
) * csum_size
;
662 item_offset
= btrfs_item_ptr_offset(leaf
,
665 memset_extent_buffer(leaf
, 0, item_offset
+ offset
,
670 * btrfs_split_item returns -EAGAIN when the
671 * item changed size or key
673 ret
= btrfs_split_item(trans
, root
, path
, &key
, offset
);
674 if (ret
&& ret
!= -EAGAIN
) {
675 btrfs_abort_transaction(trans
, root
, ret
);
679 key
.offset
= end_byte
- 1;
681 truncate_one_csum(root
, path
, &key
, bytenr
, len
);
682 if (key
.offset
< bytenr
)
685 btrfs_release_path(path
);
689 btrfs_free_path(path
);
693 int btrfs_csum_file_blocks(struct btrfs_trans_handle
*trans
,
694 struct btrfs_root
*root
,
695 struct btrfs_ordered_sum
*sums
)
697 struct btrfs_key file_key
;
698 struct btrfs_key found_key
;
699 struct btrfs_path
*path
;
700 struct btrfs_csum_item
*item
;
701 struct btrfs_csum_item
*item_end
;
702 struct extent_buffer
*leaf
= NULL
;
712 u16 csum_size
= btrfs_super_csum_size(root
->fs_info
->super_copy
);
714 path
= btrfs_alloc_path();
718 next_offset
= (u64
)-1;
720 bytenr
= sums
->bytenr
+ total_bytes
;
721 file_key
.objectid
= BTRFS_EXTENT_CSUM_OBJECTID
;
722 file_key
.offset
= bytenr
;
723 btrfs_set_key_type(&file_key
, BTRFS_EXTENT_CSUM_KEY
);
725 item
= btrfs_lookup_csum(trans
, root
, path
, bytenr
, 1);
728 leaf
= path
->nodes
[0];
729 item_end
= btrfs_item_ptr(leaf
, path
->slots
[0],
730 struct btrfs_csum_item
);
731 item_end
= (struct btrfs_csum_item
*)((char *)item_end
+
732 btrfs_item_size_nr(leaf
, path
->slots
[0]));
736 if (ret
!= -EFBIG
&& ret
!= -ENOENT
)
741 /* we found one, but it isn't big enough yet */
742 leaf
= path
->nodes
[0];
743 item_size
= btrfs_item_size_nr(leaf
, path
->slots
[0]);
744 if ((item_size
/ csum_size
) >=
745 MAX_CSUM_ITEMS(root
, csum_size
)) {
746 /* already at max size, make a new one */
750 int slot
= path
->slots
[0] + 1;
751 /* we didn't find a csum item, insert one */
752 nritems
= btrfs_header_nritems(path
->nodes
[0]);
753 if (!nritems
|| (path
->slots
[0] >= nritems
- 1)) {
754 ret
= btrfs_next_leaf(root
, path
);
759 slot
= path
->slots
[0];
761 btrfs_item_key_to_cpu(path
->nodes
[0], &found_key
, slot
);
762 if (found_key
.objectid
!= BTRFS_EXTENT_CSUM_OBJECTID
||
763 found_key
.type
!= BTRFS_EXTENT_CSUM_KEY
) {
767 next_offset
= found_key
.offset
;
773 * at this point, we know the tree has an item, but it isn't big
774 * enough yet to put our csum in. Grow it
776 btrfs_release_path(path
);
777 ret
= btrfs_search_slot(trans
, root
, &file_key
, path
,
783 if (path
->slots
[0] == 0)
788 leaf
= path
->nodes
[0];
789 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
790 csum_offset
= (bytenr
- found_key
.offset
) >>
791 root
->fs_info
->sb
->s_blocksize_bits
;
793 if (btrfs_key_type(&found_key
) != BTRFS_EXTENT_CSUM_KEY
||
794 found_key
.objectid
!= BTRFS_EXTENT_CSUM_OBJECTID
||
795 csum_offset
>= MAX_CSUM_ITEMS(root
, csum_size
)) {
799 if (csum_offset
== btrfs_item_size_nr(leaf
, path
->slots
[0]) /
806 if (btrfs_leaf_free_space(root
, leaf
) <
807 sizeof(struct btrfs_item
) + csum_size
* 2)
810 free_space
= btrfs_leaf_free_space(root
, leaf
) -
811 sizeof(struct btrfs_item
) - csum_size
;
812 tmp
= sums
->len
- total_bytes
;
813 tmp
>>= root
->fs_info
->sb
->s_blocksize_bits
;
816 extend_nr
= max_t(int, 1, (int)tmp
);
817 diff
= (csum_offset
+ extend_nr
) * csum_size
;
818 diff
= min(diff
, MAX_CSUM_ITEMS(root
, csum_size
) * csum_size
);
820 diff
= diff
- btrfs_item_size_nr(leaf
, path
->slots
[0]);
821 diff
= min(free_space
, diff
);
825 btrfs_extend_item(root
, path
, diff
);
831 btrfs_release_path(path
);
836 tmp
= sums
->len
- total_bytes
;
837 tmp
>>= root
->fs_info
->sb
->s_blocksize_bits
;
838 tmp
= min(tmp
, (next_offset
- file_key
.offset
) >>
839 root
->fs_info
->sb
->s_blocksize_bits
);
841 tmp
= max((u64
)1, tmp
);
842 tmp
= min(tmp
, (u64
)MAX_CSUM_ITEMS(root
, csum_size
));
843 ins_size
= csum_size
* tmp
;
845 ins_size
= csum_size
;
847 path
->leave_spinning
= 1;
848 ret
= btrfs_insert_empty_item(trans
, root
, path
, &file_key
,
850 path
->leave_spinning
= 0;
853 if (WARN_ON(ret
!= 0))
855 leaf
= path
->nodes
[0];
857 item
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_csum_item
);
858 item_end
= (struct btrfs_csum_item
*)((unsigned char *)item
+
859 btrfs_item_size_nr(leaf
, path
->slots
[0]));
860 item
= (struct btrfs_csum_item
*)((unsigned char *)item
+
861 csum_offset
* csum_size
);
863 ins_size
= (u32
)(sums
->len
- total_bytes
) >>
864 root
->fs_info
->sb
->s_blocksize_bits
;
865 ins_size
*= csum_size
;
866 ins_size
= min_t(u32
, (unsigned long)item_end
- (unsigned long)item
,
868 write_extent_buffer(leaf
, sums
->sums
+ index
, (unsigned long)item
,
871 ins_size
/= csum_size
;
872 total_bytes
+= ins_size
* root
->sectorsize
;
875 btrfs_mark_buffer_dirty(path
->nodes
[0]);
876 if (total_bytes
< sums
->len
) {
877 btrfs_release_path(path
);
882 btrfs_free_path(path
);
889 void btrfs_extent_item_to_extent_map(struct inode
*inode
,
890 const struct btrfs_path
*path
,
891 struct btrfs_file_extent_item
*fi
,
892 const bool new_inline
,
893 struct extent_map
*em
)
895 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
896 struct extent_buffer
*leaf
= path
->nodes
[0];
897 const int slot
= path
->slots
[0];
898 struct btrfs_key key
;
899 u64 extent_start
, extent_end
;
901 u8 type
= btrfs_file_extent_type(leaf
, fi
);
902 int compress_type
= btrfs_file_extent_compression(leaf
, fi
);
904 em
->bdev
= root
->fs_info
->fs_devices
->latest_bdev
;
905 btrfs_item_key_to_cpu(leaf
, &key
, slot
);
906 extent_start
= key
.offset
;
908 if (type
== BTRFS_FILE_EXTENT_REG
||
909 type
== BTRFS_FILE_EXTENT_PREALLOC
) {
910 extent_end
= extent_start
+
911 btrfs_file_extent_num_bytes(leaf
, fi
);
912 } else if (type
== BTRFS_FILE_EXTENT_INLINE
) {
914 size
= btrfs_file_extent_inline_len(leaf
, slot
, fi
);
915 extent_end
= ALIGN(extent_start
+ size
, root
->sectorsize
);
918 em
->ram_bytes
= btrfs_file_extent_ram_bytes(leaf
, fi
);
919 if (type
== BTRFS_FILE_EXTENT_REG
||
920 type
== BTRFS_FILE_EXTENT_PREALLOC
) {
921 em
->start
= extent_start
;
922 em
->len
= extent_end
- extent_start
;
923 em
->orig_start
= extent_start
-
924 btrfs_file_extent_offset(leaf
, fi
);
925 em
->orig_block_len
= btrfs_file_extent_disk_num_bytes(leaf
, fi
);
926 bytenr
= btrfs_file_extent_disk_bytenr(leaf
, fi
);
928 em
->block_start
= EXTENT_MAP_HOLE
;
931 if (compress_type
!= BTRFS_COMPRESS_NONE
) {
932 set_bit(EXTENT_FLAG_COMPRESSED
, &em
->flags
);
933 em
->compress_type
= compress_type
;
934 em
->block_start
= bytenr
;
935 em
->block_len
= em
->orig_block_len
;
937 bytenr
+= btrfs_file_extent_offset(leaf
, fi
);
938 em
->block_start
= bytenr
;
939 em
->block_len
= em
->len
;
940 if (type
== BTRFS_FILE_EXTENT_PREALLOC
)
941 set_bit(EXTENT_FLAG_PREALLOC
, &em
->flags
);
943 } else if (type
== BTRFS_FILE_EXTENT_INLINE
) {
944 em
->block_start
= EXTENT_MAP_INLINE
;
945 em
->start
= extent_start
;
946 em
->len
= extent_end
- extent_start
;
948 * Initialize orig_start and block_len with the same values
949 * as in inode.c:btrfs_get_extent().
951 em
->orig_start
= EXTENT_MAP_HOLE
;
952 em
->block_len
= (u64
)-1;
953 if (!new_inline
&& compress_type
!= BTRFS_COMPRESS_NONE
) {
954 set_bit(EXTENT_FLAG_COMPRESSED
, &em
->flags
);
955 em
->compress_type
= compress_type
;
958 btrfs_err(root
->fs_info
,
959 "unknown file extent item type %d, inode %llu, offset %llu, root %llu",
960 type
, btrfs_ino(inode
), extent_start
,
961 root
->root_key
.objectid
);