Merge tag 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mst/vhost
[cris-mirror.git] / fs / btrfs / file-item.c
blobfdcb410026233ecc7c5ec475b8a5d711efc47b3c
1 /*
2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/bio.h>
20 #include <linux/slab.h>
21 #include <linux/pagemap.h>
22 #include <linux/highmem.h>
23 #include "ctree.h"
24 #include "disk-io.h"
25 #include "transaction.h"
26 #include "volumes.h"
27 #include "print-tree.h"
28 #include "compression.h"
30 #define __MAX_CSUM_ITEMS(r, size) ((unsigned long)(((BTRFS_LEAF_DATA_SIZE(r) - \
31 sizeof(struct btrfs_item) * 2) / \
32 size) - 1))
34 #define MAX_CSUM_ITEMS(r, size) (min_t(u32, __MAX_CSUM_ITEMS(r, size), \
35 PAGE_SIZE))
37 #define MAX_ORDERED_SUM_BYTES(fs_info) ((PAGE_SIZE - \
38 sizeof(struct btrfs_ordered_sum)) / \
39 sizeof(u32) * (fs_info)->sectorsize)
41 int btrfs_insert_file_extent(struct btrfs_trans_handle *trans,
42 struct btrfs_root *root,
43 u64 objectid, u64 pos,
44 u64 disk_offset, u64 disk_num_bytes,
45 u64 num_bytes, u64 offset, u64 ram_bytes,
46 u8 compression, u8 encryption, u16 other_encoding)
48 int ret = 0;
49 struct btrfs_file_extent_item *item;
50 struct btrfs_key file_key;
51 struct btrfs_path *path;
52 struct extent_buffer *leaf;
54 path = btrfs_alloc_path();
55 if (!path)
56 return -ENOMEM;
57 file_key.objectid = objectid;
58 file_key.offset = pos;
59 file_key.type = BTRFS_EXTENT_DATA_KEY;
61 path->leave_spinning = 1;
62 ret = btrfs_insert_empty_item(trans, root, path, &file_key,
63 sizeof(*item));
64 if (ret < 0)
65 goto out;
66 BUG_ON(ret); /* Can't happen */
67 leaf = path->nodes[0];
68 item = btrfs_item_ptr(leaf, path->slots[0],
69 struct btrfs_file_extent_item);
70 btrfs_set_file_extent_disk_bytenr(leaf, item, disk_offset);
71 btrfs_set_file_extent_disk_num_bytes(leaf, item, disk_num_bytes);
72 btrfs_set_file_extent_offset(leaf, item, offset);
73 btrfs_set_file_extent_num_bytes(leaf, item, num_bytes);
74 btrfs_set_file_extent_ram_bytes(leaf, item, ram_bytes);
75 btrfs_set_file_extent_generation(leaf, item, trans->transid);
76 btrfs_set_file_extent_type(leaf, item, BTRFS_FILE_EXTENT_REG);
77 btrfs_set_file_extent_compression(leaf, item, compression);
78 btrfs_set_file_extent_encryption(leaf, item, encryption);
79 btrfs_set_file_extent_other_encoding(leaf, item, other_encoding);
81 btrfs_mark_buffer_dirty(leaf);
82 out:
83 btrfs_free_path(path);
84 return ret;
87 static struct btrfs_csum_item *
88 btrfs_lookup_csum(struct btrfs_trans_handle *trans,
89 struct btrfs_root *root,
90 struct btrfs_path *path,
91 u64 bytenr, int cow)
93 struct btrfs_fs_info *fs_info = root->fs_info;
94 int ret;
95 struct btrfs_key file_key;
96 struct btrfs_key found_key;
97 struct btrfs_csum_item *item;
98 struct extent_buffer *leaf;
99 u64 csum_offset = 0;
100 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
101 int csums_in_item;
103 file_key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
104 file_key.offset = bytenr;
105 file_key.type = BTRFS_EXTENT_CSUM_KEY;
106 ret = btrfs_search_slot(trans, root, &file_key, path, 0, cow);
107 if (ret < 0)
108 goto fail;
109 leaf = path->nodes[0];
110 if (ret > 0) {
111 ret = 1;
112 if (path->slots[0] == 0)
113 goto fail;
114 path->slots[0]--;
115 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
116 if (found_key.type != BTRFS_EXTENT_CSUM_KEY)
117 goto fail;
119 csum_offset = (bytenr - found_key.offset) >>
120 fs_info->sb->s_blocksize_bits;
121 csums_in_item = btrfs_item_size_nr(leaf, path->slots[0]);
122 csums_in_item /= csum_size;
124 if (csum_offset == csums_in_item) {
125 ret = -EFBIG;
126 goto fail;
127 } else if (csum_offset > csums_in_item) {
128 goto fail;
131 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_csum_item);
132 item = (struct btrfs_csum_item *)((unsigned char *)item +
133 csum_offset * csum_size);
134 return item;
135 fail:
136 if (ret > 0)
137 ret = -ENOENT;
138 return ERR_PTR(ret);
141 int btrfs_lookup_file_extent(struct btrfs_trans_handle *trans,
142 struct btrfs_root *root,
143 struct btrfs_path *path, u64 objectid,
144 u64 offset, int mod)
146 int ret;
147 struct btrfs_key file_key;
148 int ins_len = mod < 0 ? -1 : 0;
149 int cow = mod != 0;
151 file_key.objectid = objectid;
152 file_key.offset = offset;
153 file_key.type = BTRFS_EXTENT_DATA_KEY;
154 ret = btrfs_search_slot(trans, root, &file_key, path, ins_len, cow);
155 return ret;
158 static void btrfs_io_bio_endio_readpage(struct btrfs_io_bio *bio, int err)
160 kfree(bio->csum_allocated);
163 static blk_status_t __btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio,
164 u64 logical_offset, u32 *dst, int dio)
166 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
167 struct bio_vec bvec;
168 struct bvec_iter iter;
169 struct btrfs_io_bio *btrfs_bio = btrfs_io_bio(bio);
170 struct btrfs_csum_item *item = NULL;
171 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
172 struct btrfs_path *path;
173 u8 *csum;
174 u64 offset = 0;
175 u64 item_start_offset = 0;
176 u64 item_last_offset = 0;
177 u64 disk_bytenr;
178 u64 page_bytes_left;
179 u32 diff;
180 int nblocks;
181 int count = 0;
182 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
184 path = btrfs_alloc_path();
185 if (!path)
186 return BLK_STS_RESOURCE;
188 nblocks = bio->bi_iter.bi_size >> inode->i_sb->s_blocksize_bits;
189 if (!dst) {
190 if (nblocks * csum_size > BTRFS_BIO_INLINE_CSUM_SIZE) {
191 btrfs_bio->csum_allocated = kmalloc_array(nblocks,
192 csum_size, GFP_NOFS);
193 if (!btrfs_bio->csum_allocated) {
194 btrfs_free_path(path);
195 return BLK_STS_RESOURCE;
197 btrfs_bio->csum = btrfs_bio->csum_allocated;
198 btrfs_bio->end_io = btrfs_io_bio_endio_readpage;
199 } else {
200 btrfs_bio->csum = btrfs_bio->csum_inline;
202 csum = btrfs_bio->csum;
203 } else {
204 csum = (u8 *)dst;
207 if (bio->bi_iter.bi_size > PAGE_SIZE * 8)
208 path->reada = READA_FORWARD;
211 * the free space stuff is only read when it hasn't been
212 * updated in the current transaction. So, we can safely
213 * read from the commit root and sidestep a nasty deadlock
214 * between reading the free space cache and updating the csum tree.
216 if (btrfs_is_free_space_inode(BTRFS_I(inode))) {
217 path->search_commit_root = 1;
218 path->skip_locking = 1;
221 disk_bytenr = (u64)bio->bi_iter.bi_sector << 9;
222 if (dio)
223 offset = logical_offset;
225 bio_for_each_segment(bvec, bio, iter) {
226 page_bytes_left = bvec.bv_len;
227 if (count)
228 goto next;
230 if (!dio)
231 offset = page_offset(bvec.bv_page) + bvec.bv_offset;
232 count = btrfs_find_ordered_sum(inode, offset, disk_bytenr,
233 (u32 *)csum, nblocks);
234 if (count)
235 goto found;
237 if (!item || disk_bytenr < item_start_offset ||
238 disk_bytenr >= item_last_offset) {
239 struct btrfs_key found_key;
240 u32 item_size;
242 if (item)
243 btrfs_release_path(path);
244 item = btrfs_lookup_csum(NULL, fs_info->csum_root,
245 path, disk_bytenr, 0);
246 if (IS_ERR(item)) {
247 count = 1;
248 memset(csum, 0, csum_size);
249 if (BTRFS_I(inode)->root->root_key.objectid ==
250 BTRFS_DATA_RELOC_TREE_OBJECTID) {
251 set_extent_bits(io_tree, offset,
252 offset + fs_info->sectorsize - 1,
253 EXTENT_NODATASUM);
254 } else {
255 btrfs_info_rl(fs_info,
256 "no csum found for inode %llu start %llu",
257 btrfs_ino(BTRFS_I(inode)), offset);
259 item = NULL;
260 btrfs_release_path(path);
261 goto found;
263 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
264 path->slots[0]);
266 item_start_offset = found_key.offset;
267 item_size = btrfs_item_size_nr(path->nodes[0],
268 path->slots[0]);
269 item_last_offset = item_start_offset +
270 (item_size / csum_size) *
271 fs_info->sectorsize;
272 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
273 struct btrfs_csum_item);
276 * this byte range must be able to fit inside
277 * a single leaf so it will also fit inside a u32
279 diff = disk_bytenr - item_start_offset;
280 diff = diff / fs_info->sectorsize;
281 diff = diff * csum_size;
282 count = min_t(int, nblocks, (item_last_offset - disk_bytenr) >>
283 inode->i_sb->s_blocksize_bits);
284 read_extent_buffer(path->nodes[0], csum,
285 ((unsigned long)item) + diff,
286 csum_size * count);
287 found:
288 csum += count * csum_size;
289 nblocks -= count;
290 next:
291 while (count--) {
292 disk_bytenr += fs_info->sectorsize;
293 offset += fs_info->sectorsize;
294 page_bytes_left -= fs_info->sectorsize;
295 if (!page_bytes_left)
296 break; /* move to next bio */
300 WARN_ON_ONCE(count);
301 btrfs_free_path(path);
302 return 0;
305 blk_status_t btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio, u32 *dst)
307 return __btrfs_lookup_bio_sums(inode, bio, 0, dst, 0);
310 blk_status_t btrfs_lookup_bio_sums_dio(struct inode *inode, struct bio *bio, u64 offset)
312 return __btrfs_lookup_bio_sums(inode, bio, offset, NULL, 1);
315 int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
316 struct list_head *list, int search_commit)
318 struct btrfs_fs_info *fs_info = root->fs_info;
319 struct btrfs_key key;
320 struct btrfs_path *path;
321 struct extent_buffer *leaf;
322 struct btrfs_ordered_sum *sums;
323 struct btrfs_csum_item *item;
324 LIST_HEAD(tmplist);
325 unsigned long offset;
326 int ret;
327 size_t size;
328 u64 csum_end;
329 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
331 ASSERT(IS_ALIGNED(start, fs_info->sectorsize) &&
332 IS_ALIGNED(end + 1, fs_info->sectorsize));
334 path = btrfs_alloc_path();
335 if (!path)
336 return -ENOMEM;
338 if (search_commit) {
339 path->skip_locking = 1;
340 path->reada = READA_FORWARD;
341 path->search_commit_root = 1;
344 key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
345 key.offset = start;
346 key.type = BTRFS_EXTENT_CSUM_KEY;
348 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
349 if (ret < 0)
350 goto fail;
351 if (ret > 0 && path->slots[0] > 0) {
352 leaf = path->nodes[0];
353 btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1);
354 if (key.objectid == BTRFS_EXTENT_CSUM_OBJECTID &&
355 key.type == BTRFS_EXTENT_CSUM_KEY) {
356 offset = (start - key.offset) >>
357 fs_info->sb->s_blocksize_bits;
358 if (offset * csum_size <
359 btrfs_item_size_nr(leaf, path->slots[0] - 1))
360 path->slots[0]--;
364 while (start <= end) {
365 leaf = path->nodes[0];
366 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
367 ret = btrfs_next_leaf(root, path);
368 if (ret < 0)
369 goto fail;
370 if (ret > 0)
371 break;
372 leaf = path->nodes[0];
375 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
376 if (key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
377 key.type != BTRFS_EXTENT_CSUM_KEY ||
378 key.offset > end)
379 break;
381 if (key.offset > start)
382 start = key.offset;
384 size = btrfs_item_size_nr(leaf, path->slots[0]);
385 csum_end = key.offset + (size / csum_size) * fs_info->sectorsize;
386 if (csum_end <= start) {
387 path->slots[0]++;
388 continue;
391 csum_end = min(csum_end, end + 1);
392 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
393 struct btrfs_csum_item);
394 while (start < csum_end) {
395 size = min_t(size_t, csum_end - start,
396 MAX_ORDERED_SUM_BYTES(fs_info));
397 sums = kzalloc(btrfs_ordered_sum_size(fs_info, size),
398 GFP_NOFS);
399 if (!sums) {
400 ret = -ENOMEM;
401 goto fail;
404 sums->bytenr = start;
405 sums->len = (int)size;
407 offset = (start - key.offset) >>
408 fs_info->sb->s_blocksize_bits;
409 offset *= csum_size;
410 size >>= fs_info->sb->s_blocksize_bits;
412 read_extent_buffer(path->nodes[0],
413 sums->sums,
414 ((unsigned long)item) + offset,
415 csum_size * size);
417 start += fs_info->sectorsize * size;
418 list_add_tail(&sums->list, &tmplist);
420 path->slots[0]++;
422 ret = 0;
423 fail:
424 while (ret < 0 && !list_empty(&tmplist)) {
425 sums = list_entry(tmplist.next, struct btrfs_ordered_sum, list);
426 list_del(&sums->list);
427 kfree(sums);
429 list_splice_tail(&tmplist, list);
431 btrfs_free_path(path);
432 return ret;
435 blk_status_t btrfs_csum_one_bio(struct inode *inode, struct bio *bio,
436 u64 file_start, int contig)
438 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
439 struct btrfs_ordered_sum *sums;
440 struct btrfs_ordered_extent *ordered = NULL;
441 char *data;
442 struct bvec_iter iter;
443 struct bio_vec bvec;
444 int index;
445 int nr_sectors;
446 unsigned long total_bytes = 0;
447 unsigned long this_sum_bytes = 0;
448 int i;
449 u64 offset;
451 sums = kzalloc(btrfs_ordered_sum_size(fs_info, bio->bi_iter.bi_size),
452 GFP_NOFS);
453 if (!sums)
454 return BLK_STS_RESOURCE;
456 sums->len = bio->bi_iter.bi_size;
457 INIT_LIST_HEAD(&sums->list);
459 if (contig)
460 offset = file_start;
461 else
462 offset = 0; /* shut up gcc */
464 sums->bytenr = (u64)bio->bi_iter.bi_sector << 9;
465 index = 0;
467 bio_for_each_segment(bvec, bio, iter) {
468 if (!contig)
469 offset = page_offset(bvec.bv_page) + bvec.bv_offset;
471 if (!ordered) {
472 ordered = btrfs_lookup_ordered_extent(inode, offset);
473 BUG_ON(!ordered); /* Logic error */
476 data = kmap_atomic(bvec.bv_page);
478 nr_sectors = BTRFS_BYTES_TO_BLKS(fs_info,
479 bvec.bv_len + fs_info->sectorsize
480 - 1);
482 for (i = 0; i < nr_sectors; i++) {
483 if (offset >= ordered->file_offset + ordered->len ||
484 offset < ordered->file_offset) {
485 unsigned long bytes_left;
487 kunmap_atomic(data);
488 sums->len = this_sum_bytes;
489 this_sum_bytes = 0;
490 btrfs_add_ordered_sum(inode, ordered, sums);
491 btrfs_put_ordered_extent(ordered);
493 bytes_left = bio->bi_iter.bi_size - total_bytes;
495 sums = kzalloc(btrfs_ordered_sum_size(fs_info, bytes_left),
496 GFP_NOFS);
497 BUG_ON(!sums); /* -ENOMEM */
498 sums->len = bytes_left;
499 ordered = btrfs_lookup_ordered_extent(inode,
500 offset);
501 ASSERT(ordered); /* Logic error */
502 sums->bytenr = ((u64)bio->bi_iter.bi_sector << 9)
503 + total_bytes;
504 index = 0;
506 data = kmap_atomic(bvec.bv_page);
509 sums->sums[index] = ~(u32)0;
510 sums->sums[index]
511 = btrfs_csum_data(data + bvec.bv_offset
512 + (i * fs_info->sectorsize),
513 sums->sums[index],
514 fs_info->sectorsize);
515 btrfs_csum_final(sums->sums[index],
516 (char *)(sums->sums + index));
517 index++;
518 offset += fs_info->sectorsize;
519 this_sum_bytes += fs_info->sectorsize;
520 total_bytes += fs_info->sectorsize;
523 kunmap_atomic(data);
525 this_sum_bytes = 0;
526 btrfs_add_ordered_sum(inode, ordered, sums);
527 btrfs_put_ordered_extent(ordered);
528 return 0;
532 * helper function for csum removal, this expects the
533 * key to describe the csum pointed to by the path, and it expects
534 * the csum to overlap the range [bytenr, len]
536 * The csum should not be entirely contained in the range and the
537 * range should not be entirely contained in the csum.
539 * This calls btrfs_truncate_item with the correct args based on the
540 * overlap, and fixes up the key as required.
542 static noinline void truncate_one_csum(struct btrfs_fs_info *fs_info,
543 struct btrfs_path *path,
544 struct btrfs_key *key,
545 u64 bytenr, u64 len)
547 struct extent_buffer *leaf;
548 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
549 u64 csum_end;
550 u64 end_byte = bytenr + len;
551 u32 blocksize_bits = fs_info->sb->s_blocksize_bits;
553 leaf = path->nodes[0];
554 csum_end = btrfs_item_size_nr(leaf, path->slots[0]) / csum_size;
555 csum_end <<= fs_info->sb->s_blocksize_bits;
556 csum_end += key->offset;
558 if (key->offset < bytenr && csum_end <= end_byte) {
560 * [ bytenr - len ]
561 * [ ]
562 * [csum ]
563 * A simple truncate off the end of the item
565 u32 new_size = (bytenr - key->offset) >> blocksize_bits;
566 new_size *= csum_size;
567 btrfs_truncate_item(fs_info, path, new_size, 1);
568 } else if (key->offset >= bytenr && csum_end > end_byte &&
569 end_byte > key->offset) {
571 * [ bytenr - len ]
572 * [ ]
573 * [csum ]
574 * we need to truncate from the beginning of the csum
576 u32 new_size = (csum_end - end_byte) >> blocksize_bits;
577 new_size *= csum_size;
579 btrfs_truncate_item(fs_info, path, new_size, 0);
581 key->offset = end_byte;
582 btrfs_set_item_key_safe(fs_info, path, key);
583 } else {
584 BUG();
589 * deletes the csum items from the csum tree for a given
590 * range of bytes.
592 int btrfs_del_csums(struct btrfs_trans_handle *trans,
593 struct btrfs_fs_info *fs_info, u64 bytenr, u64 len)
595 struct btrfs_root *root = fs_info->csum_root;
596 struct btrfs_path *path;
597 struct btrfs_key key;
598 u64 end_byte = bytenr + len;
599 u64 csum_end;
600 struct extent_buffer *leaf;
601 int ret;
602 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
603 int blocksize_bits = fs_info->sb->s_blocksize_bits;
605 path = btrfs_alloc_path();
606 if (!path)
607 return -ENOMEM;
609 while (1) {
610 key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
611 key.offset = end_byte - 1;
612 key.type = BTRFS_EXTENT_CSUM_KEY;
614 path->leave_spinning = 1;
615 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
616 if (ret > 0) {
617 if (path->slots[0] == 0)
618 break;
619 path->slots[0]--;
620 } else if (ret < 0) {
621 break;
624 leaf = path->nodes[0];
625 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
627 if (key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
628 key.type != BTRFS_EXTENT_CSUM_KEY) {
629 break;
632 if (key.offset >= end_byte)
633 break;
635 csum_end = btrfs_item_size_nr(leaf, path->slots[0]) / csum_size;
636 csum_end <<= blocksize_bits;
637 csum_end += key.offset;
639 /* this csum ends before we start, we're done */
640 if (csum_end <= bytenr)
641 break;
643 /* delete the entire item, it is inside our range */
644 if (key.offset >= bytenr && csum_end <= end_byte) {
645 int del_nr = 1;
648 * Check how many csum items preceding this one in this
649 * leaf correspond to our range and then delete them all
650 * at once.
652 if (key.offset > bytenr && path->slots[0] > 0) {
653 int slot = path->slots[0] - 1;
655 while (slot >= 0) {
656 struct btrfs_key pk;
658 btrfs_item_key_to_cpu(leaf, &pk, slot);
659 if (pk.offset < bytenr ||
660 pk.type != BTRFS_EXTENT_CSUM_KEY ||
661 pk.objectid !=
662 BTRFS_EXTENT_CSUM_OBJECTID)
663 break;
664 path->slots[0] = slot;
665 del_nr++;
666 key.offset = pk.offset;
667 slot--;
670 ret = btrfs_del_items(trans, root, path,
671 path->slots[0], del_nr);
672 if (ret)
673 goto out;
674 if (key.offset == bytenr)
675 break;
676 } else if (key.offset < bytenr && csum_end > end_byte) {
677 unsigned long offset;
678 unsigned long shift_len;
679 unsigned long item_offset;
681 * [ bytenr - len ]
682 * [csum ]
684 * Our bytes are in the middle of the csum,
685 * we need to split this item and insert a new one.
687 * But we can't drop the path because the
688 * csum could change, get removed, extended etc.
690 * The trick here is the max size of a csum item leaves
691 * enough room in the tree block for a single
692 * item header. So, we split the item in place,
693 * adding a new header pointing to the existing
694 * bytes. Then we loop around again and we have
695 * a nicely formed csum item that we can neatly
696 * truncate.
698 offset = (bytenr - key.offset) >> blocksize_bits;
699 offset *= csum_size;
701 shift_len = (len >> blocksize_bits) * csum_size;
703 item_offset = btrfs_item_ptr_offset(leaf,
704 path->slots[0]);
706 memzero_extent_buffer(leaf, item_offset + offset,
707 shift_len);
708 key.offset = bytenr;
711 * btrfs_split_item returns -EAGAIN when the
712 * item changed size or key
714 ret = btrfs_split_item(trans, root, path, &key, offset);
715 if (ret && ret != -EAGAIN) {
716 btrfs_abort_transaction(trans, ret);
717 goto out;
720 key.offset = end_byte - 1;
721 } else {
722 truncate_one_csum(fs_info, path, &key, bytenr, len);
723 if (key.offset < bytenr)
724 break;
726 btrfs_release_path(path);
728 ret = 0;
729 out:
730 btrfs_free_path(path);
731 return ret;
734 int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
735 struct btrfs_root *root,
736 struct btrfs_ordered_sum *sums)
738 struct btrfs_fs_info *fs_info = root->fs_info;
739 struct btrfs_key file_key;
740 struct btrfs_key found_key;
741 struct btrfs_path *path;
742 struct btrfs_csum_item *item;
743 struct btrfs_csum_item *item_end;
744 struct extent_buffer *leaf = NULL;
745 u64 next_offset;
746 u64 total_bytes = 0;
747 u64 csum_offset;
748 u64 bytenr;
749 u32 nritems;
750 u32 ins_size;
751 int index = 0;
752 int found_next;
753 int ret;
754 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
756 path = btrfs_alloc_path();
757 if (!path)
758 return -ENOMEM;
759 again:
760 next_offset = (u64)-1;
761 found_next = 0;
762 bytenr = sums->bytenr + total_bytes;
763 file_key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
764 file_key.offset = bytenr;
765 file_key.type = BTRFS_EXTENT_CSUM_KEY;
767 item = btrfs_lookup_csum(trans, root, path, bytenr, 1);
768 if (!IS_ERR(item)) {
769 ret = 0;
770 leaf = path->nodes[0];
771 item_end = btrfs_item_ptr(leaf, path->slots[0],
772 struct btrfs_csum_item);
773 item_end = (struct btrfs_csum_item *)((char *)item_end +
774 btrfs_item_size_nr(leaf, path->slots[0]));
775 goto found;
777 ret = PTR_ERR(item);
778 if (ret != -EFBIG && ret != -ENOENT)
779 goto fail_unlock;
781 if (ret == -EFBIG) {
782 u32 item_size;
783 /* we found one, but it isn't big enough yet */
784 leaf = path->nodes[0];
785 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
786 if ((item_size / csum_size) >=
787 MAX_CSUM_ITEMS(fs_info, csum_size)) {
788 /* already at max size, make a new one */
789 goto insert;
791 } else {
792 int slot = path->slots[0] + 1;
793 /* we didn't find a csum item, insert one */
794 nritems = btrfs_header_nritems(path->nodes[0]);
795 if (!nritems || (path->slots[0] >= nritems - 1)) {
796 ret = btrfs_next_leaf(root, path);
797 if (ret == 1)
798 found_next = 1;
799 if (ret != 0)
800 goto insert;
801 slot = path->slots[0];
803 btrfs_item_key_to_cpu(path->nodes[0], &found_key, slot);
804 if (found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
805 found_key.type != BTRFS_EXTENT_CSUM_KEY) {
806 found_next = 1;
807 goto insert;
809 next_offset = found_key.offset;
810 found_next = 1;
811 goto insert;
815 * at this point, we know the tree has an item, but it isn't big
816 * enough yet to put our csum in. Grow it
818 btrfs_release_path(path);
819 ret = btrfs_search_slot(trans, root, &file_key, path,
820 csum_size, 1);
821 if (ret < 0)
822 goto fail_unlock;
824 if (ret > 0) {
825 if (path->slots[0] == 0)
826 goto insert;
827 path->slots[0]--;
830 leaf = path->nodes[0];
831 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
832 csum_offset = (bytenr - found_key.offset) >>
833 fs_info->sb->s_blocksize_bits;
835 if (found_key.type != BTRFS_EXTENT_CSUM_KEY ||
836 found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
837 csum_offset >= MAX_CSUM_ITEMS(fs_info, csum_size)) {
838 goto insert;
841 if (csum_offset == btrfs_item_size_nr(leaf, path->slots[0]) /
842 csum_size) {
843 int extend_nr;
844 u64 tmp;
845 u32 diff;
846 u32 free_space;
848 if (btrfs_leaf_free_space(fs_info, leaf) <
849 sizeof(struct btrfs_item) + csum_size * 2)
850 goto insert;
852 free_space = btrfs_leaf_free_space(fs_info, leaf) -
853 sizeof(struct btrfs_item) - csum_size;
854 tmp = sums->len - total_bytes;
855 tmp >>= fs_info->sb->s_blocksize_bits;
856 WARN_ON(tmp < 1);
858 extend_nr = max_t(int, 1, (int)tmp);
859 diff = (csum_offset + extend_nr) * csum_size;
860 diff = min(diff,
861 MAX_CSUM_ITEMS(fs_info, csum_size) * csum_size);
863 diff = diff - btrfs_item_size_nr(leaf, path->slots[0]);
864 diff = min(free_space, diff);
865 diff /= csum_size;
866 diff *= csum_size;
868 btrfs_extend_item(fs_info, path, diff);
869 ret = 0;
870 goto csum;
873 insert:
874 btrfs_release_path(path);
875 csum_offset = 0;
876 if (found_next) {
877 u64 tmp;
879 tmp = sums->len - total_bytes;
880 tmp >>= fs_info->sb->s_blocksize_bits;
881 tmp = min(tmp, (next_offset - file_key.offset) >>
882 fs_info->sb->s_blocksize_bits);
884 tmp = max_t(u64, 1, tmp);
885 tmp = min_t(u64, tmp, MAX_CSUM_ITEMS(fs_info, csum_size));
886 ins_size = csum_size * tmp;
887 } else {
888 ins_size = csum_size;
890 path->leave_spinning = 1;
891 ret = btrfs_insert_empty_item(trans, root, path, &file_key,
892 ins_size);
893 path->leave_spinning = 0;
894 if (ret < 0)
895 goto fail_unlock;
896 if (WARN_ON(ret != 0))
897 goto fail_unlock;
898 leaf = path->nodes[0];
899 csum:
900 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_csum_item);
901 item_end = (struct btrfs_csum_item *)((unsigned char *)item +
902 btrfs_item_size_nr(leaf, path->slots[0]));
903 item = (struct btrfs_csum_item *)((unsigned char *)item +
904 csum_offset * csum_size);
905 found:
906 ins_size = (u32)(sums->len - total_bytes) >>
907 fs_info->sb->s_blocksize_bits;
908 ins_size *= csum_size;
909 ins_size = min_t(u32, (unsigned long)item_end - (unsigned long)item,
910 ins_size);
911 write_extent_buffer(leaf, sums->sums + index, (unsigned long)item,
912 ins_size);
914 ins_size /= csum_size;
915 total_bytes += ins_size * fs_info->sectorsize;
916 index += ins_size;
918 btrfs_mark_buffer_dirty(path->nodes[0]);
919 if (total_bytes < sums->len) {
920 btrfs_release_path(path);
921 cond_resched();
922 goto again;
924 out:
925 btrfs_free_path(path);
926 return ret;
928 fail_unlock:
929 goto out;
932 void btrfs_extent_item_to_extent_map(struct btrfs_inode *inode,
933 const struct btrfs_path *path,
934 struct btrfs_file_extent_item *fi,
935 const bool new_inline,
936 struct extent_map *em)
938 struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
939 struct btrfs_root *root = inode->root;
940 struct extent_buffer *leaf = path->nodes[0];
941 const int slot = path->slots[0];
942 struct btrfs_key key;
943 u64 extent_start, extent_end;
944 u64 bytenr;
945 u8 type = btrfs_file_extent_type(leaf, fi);
946 int compress_type = btrfs_file_extent_compression(leaf, fi);
948 em->bdev = fs_info->fs_devices->latest_bdev;
949 btrfs_item_key_to_cpu(leaf, &key, slot);
950 extent_start = key.offset;
952 if (type == BTRFS_FILE_EXTENT_REG ||
953 type == BTRFS_FILE_EXTENT_PREALLOC) {
954 extent_end = extent_start +
955 btrfs_file_extent_num_bytes(leaf, fi);
956 } else if (type == BTRFS_FILE_EXTENT_INLINE) {
957 size_t size;
958 size = btrfs_file_extent_inline_len(leaf, slot, fi);
959 extent_end = ALIGN(extent_start + size,
960 fs_info->sectorsize);
963 em->ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
964 if (type == BTRFS_FILE_EXTENT_REG ||
965 type == BTRFS_FILE_EXTENT_PREALLOC) {
966 em->start = extent_start;
967 em->len = extent_end - extent_start;
968 em->orig_start = extent_start -
969 btrfs_file_extent_offset(leaf, fi);
970 em->orig_block_len = btrfs_file_extent_disk_num_bytes(leaf, fi);
971 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
972 if (bytenr == 0) {
973 em->block_start = EXTENT_MAP_HOLE;
974 return;
976 if (compress_type != BTRFS_COMPRESS_NONE) {
977 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
978 em->compress_type = compress_type;
979 em->block_start = bytenr;
980 em->block_len = em->orig_block_len;
981 } else {
982 bytenr += btrfs_file_extent_offset(leaf, fi);
983 em->block_start = bytenr;
984 em->block_len = em->len;
985 if (type == BTRFS_FILE_EXTENT_PREALLOC)
986 set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
988 } else if (type == BTRFS_FILE_EXTENT_INLINE) {
989 em->block_start = EXTENT_MAP_INLINE;
990 em->start = extent_start;
991 em->len = extent_end - extent_start;
993 * Initialize orig_start and block_len with the same values
994 * as in inode.c:btrfs_get_extent().
996 em->orig_start = EXTENT_MAP_HOLE;
997 em->block_len = (u64)-1;
998 if (!new_inline && compress_type != BTRFS_COMPRESS_NONE) {
999 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
1000 em->compress_type = compress_type;
1002 } else {
1003 btrfs_err(fs_info,
1004 "unknown file extent item type %d, inode %llu, offset %llu, "
1005 "root %llu", type, btrfs_ino(inode), extent_start,
1006 root->root_key.objectid);