btrfs: constify map parameter for nr_parity_stripes and nr_data_stripes
[linux/fpc-iii.git] / fs / btrfs / file-item.c
blobd431ea8198e411e1ce88937179fc0f5e1e519770
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2007 Oracle. All rights reserved.
4 */
6 #include <linux/bio.h>
7 #include <linux/slab.h>
8 #include <linux/pagemap.h>
9 #include <linux/highmem.h>
10 #include <linux/sched/mm.h>
11 #include "ctree.h"
12 #include "disk-io.h"
13 #include "transaction.h"
14 #include "volumes.h"
15 #include "print-tree.h"
16 #include "compression.h"
18 #define __MAX_CSUM_ITEMS(r, size) ((unsigned long)(((BTRFS_LEAF_DATA_SIZE(r) - \
19 sizeof(struct btrfs_item) * 2) / \
20 size) - 1))
22 #define MAX_CSUM_ITEMS(r, size) (min_t(u32, __MAX_CSUM_ITEMS(r, size), \
23 PAGE_SIZE))
25 #define MAX_ORDERED_SUM_BYTES(fs_info) ((PAGE_SIZE - \
26 sizeof(struct btrfs_ordered_sum)) / \
27 sizeof(u32) * (fs_info)->sectorsize)
29 int btrfs_insert_file_extent(struct btrfs_trans_handle *trans,
30 struct btrfs_root *root,
31 u64 objectid, u64 pos,
32 u64 disk_offset, u64 disk_num_bytes,
33 u64 num_bytes, u64 offset, u64 ram_bytes,
34 u8 compression, u8 encryption, u16 other_encoding)
36 int ret = 0;
37 struct btrfs_file_extent_item *item;
38 struct btrfs_key file_key;
39 struct btrfs_path *path;
40 struct extent_buffer *leaf;
42 path = btrfs_alloc_path();
43 if (!path)
44 return -ENOMEM;
45 file_key.objectid = objectid;
46 file_key.offset = pos;
47 file_key.type = BTRFS_EXTENT_DATA_KEY;
49 path->leave_spinning = 1;
50 ret = btrfs_insert_empty_item(trans, root, path, &file_key,
51 sizeof(*item));
52 if (ret < 0)
53 goto out;
54 BUG_ON(ret); /* Can't happen */
55 leaf = path->nodes[0];
56 item = btrfs_item_ptr(leaf, path->slots[0],
57 struct btrfs_file_extent_item);
58 btrfs_set_file_extent_disk_bytenr(leaf, item, disk_offset);
59 btrfs_set_file_extent_disk_num_bytes(leaf, item, disk_num_bytes);
60 btrfs_set_file_extent_offset(leaf, item, offset);
61 btrfs_set_file_extent_num_bytes(leaf, item, num_bytes);
62 btrfs_set_file_extent_ram_bytes(leaf, item, ram_bytes);
63 btrfs_set_file_extent_generation(leaf, item, trans->transid);
64 btrfs_set_file_extent_type(leaf, item, BTRFS_FILE_EXTENT_REG);
65 btrfs_set_file_extent_compression(leaf, item, compression);
66 btrfs_set_file_extent_encryption(leaf, item, encryption);
67 btrfs_set_file_extent_other_encoding(leaf, item, other_encoding);
69 btrfs_mark_buffer_dirty(leaf);
70 out:
71 btrfs_free_path(path);
72 return ret;
75 static struct btrfs_csum_item *
76 btrfs_lookup_csum(struct btrfs_trans_handle *trans,
77 struct btrfs_root *root,
78 struct btrfs_path *path,
79 u64 bytenr, int cow)
81 struct btrfs_fs_info *fs_info = root->fs_info;
82 int ret;
83 struct btrfs_key file_key;
84 struct btrfs_key found_key;
85 struct btrfs_csum_item *item;
86 struct extent_buffer *leaf;
87 u64 csum_offset = 0;
88 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
89 int csums_in_item;
91 file_key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
92 file_key.offset = bytenr;
93 file_key.type = BTRFS_EXTENT_CSUM_KEY;
94 ret = btrfs_search_slot(trans, root, &file_key, path, 0, cow);
95 if (ret < 0)
96 goto fail;
97 leaf = path->nodes[0];
98 if (ret > 0) {
99 ret = 1;
100 if (path->slots[0] == 0)
101 goto fail;
102 path->slots[0]--;
103 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
104 if (found_key.type != BTRFS_EXTENT_CSUM_KEY)
105 goto fail;
107 csum_offset = (bytenr - found_key.offset) >>
108 fs_info->sb->s_blocksize_bits;
109 csums_in_item = btrfs_item_size_nr(leaf, path->slots[0]);
110 csums_in_item /= csum_size;
112 if (csum_offset == csums_in_item) {
113 ret = -EFBIG;
114 goto fail;
115 } else if (csum_offset > csums_in_item) {
116 goto fail;
119 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_csum_item);
120 item = (struct btrfs_csum_item *)((unsigned char *)item +
121 csum_offset * csum_size);
122 return item;
123 fail:
124 if (ret > 0)
125 ret = -ENOENT;
126 return ERR_PTR(ret);
129 int btrfs_lookup_file_extent(struct btrfs_trans_handle *trans,
130 struct btrfs_root *root,
131 struct btrfs_path *path, u64 objectid,
132 u64 offset, int mod)
134 int ret;
135 struct btrfs_key file_key;
136 int ins_len = mod < 0 ? -1 : 0;
137 int cow = mod != 0;
139 file_key.objectid = objectid;
140 file_key.offset = offset;
141 file_key.type = BTRFS_EXTENT_DATA_KEY;
142 ret = btrfs_search_slot(trans, root, &file_key, path, ins_len, cow);
143 return ret;
146 static blk_status_t __btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio,
147 u64 logical_offset, u32 *dst, int dio)
149 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
150 struct bio_vec bvec;
151 struct bvec_iter iter;
152 struct btrfs_io_bio *btrfs_bio = btrfs_io_bio(bio);
153 struct btrfs_csum_item *item = NULL;
154 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
155 struct btrfs_path *path;
156 u8 *csum;
157 u64 offset = 0;
158 u64 item_start_offset = 0;
159 u64 item_last_offset = 0;
160 u64 disk_bytenr;
161 u64 page_bytes_left;
162 u32 diff;
163 int nblocks;
164 int count = 0;
165 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
167 path = btrfs_alloc_path();
168 if (!path)
169 return BLK_STS_RESOURCE;
171 nblocks = bio->bi_iter.bi_size >> inode->i_sb->s_blocksize_bits;
172 if (!dst) {
173 if (nblocks * csum_size > BTRFS_BIO_INLINE_CSUM_SIZE) {
174 btrfs_bio->csum = kmalloc_array(nblocks, csum_size,
175 GFP_NOFS);
176 if (!btrfs_bio->csum) {
177 btrfs_free_path(path);
178 return BLK_STS_RESOURCE;
180 } else {
181 btrfs_bio->csum = btrfs_bio->csum_inline;
183 csum = btrfs_bio->csum;
184 } else {
185 csum = (u8 *)dst;
188 if (bio->bi_iter.bi_size > PAGE_SIZE * 8)
189 path->reada = READA_FORWARD;
192 * the free space stuff is only read when it hasn't been
193 * updated in the current transaction. So, we can safely
194 * read from the commit root and sidestep a nasty deadlock
195 * between reading the free space cache and updating the csum tree.
197 if (btrfs_is_free_space_inode(BTRFS_I(inode))) {
198 path->search_commit_root = 1;
199 path->skip_locking = 1;
202 disk_bytenr = (u64)bio->bi_iter.bi_sector << 9;
203 if (dio)
204 offset = logical_offset;
206 bio_for_each_segment(bvec, bio, iter) {
207 page_bytes_left = bvec.bv_len;
208 if (count)
209 goto next;
211 if (!dio)
212 offset = page_offset(bvec.bv_page) + bvec.bv_offset;
213 count = btrfs_find_ordered_sum(inode, offset, disk_bytenr,
214 (u32 *)csum, nblocks);
215 if (count)
216 goto found;
218 if (!item || disk_bytenr < item_start_offset ||
219 disk_bytenr >= item_last_offset) {
220 struct btrfs_key found_key;
221 u32 item_size;
223 if (item)
224 btrfs_release_path(path);
225 item = btrfs_lookup_csum(NULL, fs_info->csum_root,
226 path, disk_bytenr, 0);
227 if (IS_ERR(item)) {
228 count = 1;
229 memset(csum, 0, csum_size);
230 if (BTRFS_I(inode)->root->root_key.objectid ==
231 BTRFS_DATA_RELOC_TREE_OBJECTID) {
232 set_extent_bits(io_tree, offset,
233 offset + fs_info->sectorsize - 1,
234 EXTENT_NODATASUM);
235 } else {
236 btrfs_info_rl(fs_info,
237 "no csum found for inode %llu start %llu",
238 btrfs_ino(BTRFS_I(inode)), offset);
240 item = NULL;
241 btrfs_release_path(path);
242 goto found;
244 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
245 path->slots[0]);
247 item_start_offset = found_key.offset;
248 item_size = btrfs_item_size_nr(path->nodes[0],
249 path->slots[0]);
250 item_last_offset = item_start_offset +
251 (item_size / csum_size) *
252 fs_info->sectorsize;
253 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
254 struct btrfs_csum_item);
257 * this byte range must be able to fit inside
258 * a single leaf so it will also fit inside a u32
260 diff = disk_bytenr - item_start_offset;
261 diff = diff / fs_info->sectorsize;
262 diff = diff * csum_size;
263 count = min_t(int, nblocks, (item_last_offset - disk_bytenr) >>
264 inode->i_sb->s_blocksize_bits);
265 read_extent_buffer(path->nodes[0], csum,
266 ((unsigned long)item) + diff,
267 csum_size * count);
268 found:
269 csum += count * csum_size;
270 nblocks -= count;
271 next:
272 while (count--) {
273 disk_bytenr += fs_info->sectorsize;
274 offset += fs_info->sectorsize;
275 page_bytes_left -= fs_info->sectorsize;
276 if (!page_bytes_left)
277 break; /* move to next bio */
281 WARN_ON_ONCE(count);
282 btrfs_free_path(path);
283 return 0;
286 blk_status_t btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio, u32 *dst)
288 return __btrfs_lookup_bio_sums(inode, bio, 0, dst, 0);
291 blk_status_t btrfs_lookup_bio_sums_dio(struct inode *inode, struct bio *bio, u64 offset)
293 return __btrfs_lookup_bio_sums(inode, bio, offset, NULL, 1);
296 int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
297 struct list_head *list, int search_commit)
299 struct btrfs_fs_info *fs_info = root->fs_info;
300 struct btrfs_key key;
301 struct btrfs_path *path;
302 struct extent_buffer *leaf;
303 struct btrfs_ordered_sum *sums;
304 struct btrfs_csum_item *item;
305 LIST_HEAD(tmplist);
306 unsigned long offset;
307 int ret;
308 size_t size;
309 u64 csum_end;
310 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
312 ASSERT(IS_ALIGNED(start, fs_info->sectorsize) &&
313 IS_ALIGNED(end + 1, fs_info->sectorsize));
315 path = btrfs_alloc_path();
316 if (!path)
317 return -ENOMEM;
319 if (search_commit) {
320 path->skip_locking = 1;
321 path->reada = READA_FORWARD;
322 path->search_commit_root = 1;
325 key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
326 key.offset = start;
327 key.type = BTRFS_EXTENT_CSUM_KEY;
329 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
330 if (ret < 0)
331 goto fail;
332 if (ret > 0 && path->slots[0] > 0) {
333 leaf = path->nodes[0];
334 btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1);
335 if (key.objectid == BTRFS_EXTENT_CSUM_OBJECTID &&
336 key.type == BTRFS_EXTENT_CSUM_KEY) {
337 offset = (start - key.offset) >>
338 fs_info->sb->s_blocksize_bits;
339 if (offset * csum_size <
340 btrfs_item_size_nr(leaf, path->slots[0] - 1))
341 path->slots[0]--;
345 while (start <= end) {
346 leaf = path->nodes[0];
347 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
348 ret = btrfs_next_leaf(root, path);
349 if (ret < 0)
350 goto fail;
351 if (ret > 0)
352 break;
353 leaf = path->nodes[0];
356 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
357 if (key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
358 key.type != BTRFS_EXTENT_CSUM_KEY ||
359 key.offset > end)
360 break;
362 if (key.offset > start)
363 start = key.offset;
365 size = btrfs_item_size_nr(leaf, path->slots[0]);
366 csum_end = key.offset + (size / csum_size) * fs_info->sectorsize;
367 if (csum_end <= start) {
368 path->slots[0]++;
369 continue;
372 csum_end = min(csum_end, end + 1);
373 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
374 struct btrfs_csum_item);
375 while (start < csum_end) {
376 size = min_t(size_t, csum_end - start,
377 MAX_ORDERED_SUM_BYTES(fs_info));
378 sums = kzalloc(btrfs_ordered_sum_size(fs_info, size),
379 GFP_NOFS);
380 if (!sums) {
381 ret = -ENOMEM;
382 goto fail;
385 sums->bytenr = start;
386 sums->len = (int)size;
388 offset = (start - key.offset) >>
389 fs_info->sb->s_blocksize_bits;
390 offset *= csum_size;
391 size >>= fs_info->sb->s_blocksize_bits;
393 read_extent_buffer(path->nodes[0],
394 sums->sums,
395 ((unsigned long)item) + offset,
396 csum_size * size);
398 start += fs_info->sectorsize * size;
399 list_add_tail(&sums->list, &tmplist);
401 path->slots[0]++;
403 ret = 0;
404 fail:
405 while (ret < 0 && !list_empty(&tmplist)) {
406 sums = list_entry(tmplist.next, struct btrfs_ordered_sum, list);
407 list_del(&sums->list);
408 kfree(sums);
410 list_splice_tail(&tmplist, list);
412 btrfs_free_path(path);
413 return ret;
417 * btrfs_csum_one_bio - Calculates checksums of the data contained inside a bio
418 * @inode: Owner of the data inside the bio
419 * @bio: Contains the data to be checksummed
420 * @file_start: offset in file this bio begins to describe
421 * @contig: Boolean. If true/1 means all bio vecs in this bio are
422 * contiguous and they begin at @file_start in the file. False/0
423 * means this bio can contains potentially discontigous bio vecs
424 * so the logical offset of each should be calculated separately.
426 blk_status_t btrfs_csum_one_bio(struct inode *inode, struct bio *bio,
427 u64 file_start, int contig)
429 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
430 struct btrfs_ordered_sum *sums;
431 struct btrfs_ordered_extent *ordered = NULL;
432 char *data;
433 struct bvec_iter iter;
434 struct bio_vec bvec;
435 int index;
436 int nr_sectors;
437 unsigned long total_bytes = 0;
438 unsigned long this_sum_bytes = 0;
439 int i;
440 u64 offset;
441 unsigned nofs_flag;
443 nofs_flag = memalloc_nofs_save();
444 sums = kvzalloc(btrfs_ordered_sum_size(fs_info, bio->bi_iter.bi_size),
445 GFP_KERNEL);
446 memalloc_nofs_restore(nofs_flag);
448 if (!sums)
449 return BLK_STS_RESOURCE;
451 sums->len = bio->bi_iter.bi_size;
452 INIT_LIST_HEAD(&sums->list);
454 if (contig)
455 offset = file_start;
456 else
457 offset = 0; /* shut up gcc */
459 sums->bytenr = (u64)bio->bi_iter.bi_sector << 9;
460 index = 0;
462 bio_for_each_segment(bvec, bio, iter) {
463 if (!contig)
464 offset = page_offset(bvec.bv_page) + bvec.bv_offset;
466 if (!ordered) {
467 ordered = btrfs_lookup_ordered_extent(inode, offset);
468 BUG_ON(!ordered); /* Logic error */
471 nr_sectors = BTRFS_BYTES_TO_BLKS(fs_info,
472 bvec.bv_len + fs_info->sectorsize
473 - 1);
475 for (i = 0; i < nr_sectors; i++) {
476 if (offset >= ordered->file_offset + ordered->len ||
477 offset < ordered->file_offset) {
478 unsigned long bytes_left;
480 sums->len = this_sum_bytes;
481 this_sum_bytes = 0;
482 btrfs_add_ordered_sum(ordered, sums);
483 btrfs_put_ordered_extent(ordered);
485 bytes_left = bio->bi_iter.bi_size - total_bytes;
487 nofs_flag = memalloc_nofs_save();
488 sums = kvzalloc(btrfs_ordered_sum_size(fs_info,
489 bytes_left), GFP_KERNEL);
490 memalloc_nofs_restore(nofs_flag);
491 BUG_ON(!sums); /* -ENOMEM */
492 sums->len = bytes_left;
493 ordered = btrfs_lookup_ordered_extent(inode,
494 offset);
495 ASSERT(ordered); /* Logic error */
496 sums->bytenr = ((u64)bio->bi_iter.bi_sector << 9)
497 + total_bytes;
498 index = 0;
501 sums->sums[index] = ~(u32)0;
502 data = kmap_atomic(bvec.bv_page);
503 sums->sums[index]
504 = btrfs_csum_data(data + bvec.bv_offset
505 + (i * fs_info->sectorsize),
506 sums->sums[index],
507 fs_info->sectorsize);
508 kunmap_atomic(data);
509 btrfs_csum_final(sums->sums[index],
510 (char *)(sums->sums + index));
511 index++;
512 offset += fs_info->sectorsize;
513 this_sum_bytes += fs_info->sectorsize;
514 total_bytes += fs_info->sectorsize;
518 this_sum_bytes = 0;
519 btrfs_add_ordered_sum(ordered, sums);
520 btrfs_put_ordered_extent(ordered);
521 return 0;
525 * helper function for csum removal, this expects the
526 * key to describe the csum pointed to by the path, and it expects
527 * the csum to overlap the range [bytenr, len]
529 * The csum should not be entirely contained in the range and the
530 * range should not be entirely contained in the csum.
532 * This calls btrfs_truncate_item with the correct args based on the
533 * overlap, and fixes up the key as required.
535 static noinline void truncate_one_csum(struct btrfs_fs_info *fs_info,
536 struct btrfs_path *path,
537 struct btrfs_key *key,
538 u64 bytenr, u64 len)
540 struct extent_buffer *leaf;
541 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
542 u64 csum_end;
543 u64 end_byte = bytenr + len;
544 u32 blocksize_bits = fs_info->sb->s_blocksize_bits;
546 leaf = path->nodes[0];
547 csum_end = btrfs_item_size_nr(leaf, path->slots[0]) / csum_size;
548 csum_end <<= fs_info->sb->s_blocksize_bits;
549 csum_end += key->offset;
551 if (key->offset < bytenr && csum_end <= end_byte) {
553 * [ bytenr - len ]
554 * [ ]
555 * [csum ]
556 * A simple truncate off the end of the item
558 u32 new_size = (bytenr - key->offset) >> blocksize_bits;
559 new_size *= csum_size;
560 btrfs_truncate_item(path, new_size, 1);
561 } else if (key->offset >= bytenr && csum_end > end_byte &&
562 end_byte > key->offset) {
564 * [ bytenr - len ]
565 * [ ]
566 * [csum ]
567 * we need to truncate from the beginning of the csum
569 u32 new_size = (csum_end - end_byte) >> blocksize_bits;
570 new_size *= csum_size;
572 btrfs_truncate_item(path, new_size, 0);
574 key->offset = end_byte;
575 btrfs_set_item_key_safe(fs_info, path, key);
576 } else {
577 BUG();
582 * deletes the csum items from the csum tree for a given
583 * range of bytes.
585 int btrfs_del_csums(struct btrfs_trans_handle *trans,
586 struct btrfs_fs_info *fs_info, u64 bytenr, u64 len)
588 struct btrfs_root *root = fs_info->csum_root;
589 struct btrfs_path *path;
590 struct btrfs_key key;
591 u64 end_byte = bytenr + len;
592 u64 csum_end;
593 struct extent_buffer *leaf;
594 int ret;
595 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
596 int blocksize_bits = fs_info->sb->s_blocksize_bits;
598 path = btrfs_alloc_path();
599 if (!path)
600 return -ENOMEM;
602 while (1) {
603 key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
604 key.offset = end_byte - 1;
605 key.type = BTRFS_EXTENT_CSUM_KEY;
607 path->leave_spinning = 1;
608 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
609 if (ret > 0) {
610 if (path->slots[0] == 0)
611 break;
612 path->slots[0]--;
613 } else if (ret < 0) {
614 break;
617 leaf = path->nodes[0];
618 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
620 if (key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
621 key.type != BTRFS_EXTENT_CSUM_KEY) {
622 break;
625 if (key.offset >= end_byte)
626 break;
628 csum_end = btrfs_item_size_nr(leaf, path->slots[0]) / csum_size;
629 csum_end <<= blocksize_bits;
630 csum_end += key.offset;
632 /* this csum ends before we start, we're done */
633 if (csum_end <= bytenr)
634 break;
636 /* delete the entire item, it is inside our range */
637 if (key.offset >= bytenr && csum_end <= end_byte) {
638 int del_nr = 1;
641 * Check how many csum items preceding this one in this
642 * leaf correspond to our range and then delete them all
643 * at once.
645 if (key.offset > bytenr && path->slots[0] > 0) {
646 int slot = path->slots[0] - 1;
648 while (slot >= 0) {
649 struct btrfs_key pk;
651 btrfs_item_key_to_cpu(leaf, &pk, slot);
652 if (pk.offset < bytenr ||
653 pk.type != BTRFS_EXTENT_CSUM_KEY ||
654 pk.objectid !=
655 BTRFS_EXTENT_CSUM_OBJECTID)
656 break;
657 path->slots[0] = slot;
658 del_nr++;
659 key.offset = pk.offset;
660 slot--;
663 ret = btrfs_del_items(trans, root, path,
664 path->slots[0], del_nr);
665 if (ret)
666 goto out;
667 if (key.offset == bytenr)
668 break;
669 } else if (key.offset < bytenr && csum_end > end_byte) {
670 unsigned long offset;
671 unsigned long shift_len;
672 unsigned long item_offset;
674 * [ bytenr - len ]
675 * [csum ]
677 * Our bytes are in the middle of the csum,
678 * we need to split this item and insert a new one.
680 * But we can't drop the path because the
681 * csum could change, get removed, extended etc.
683 * The trick here is the max size of a csum item leaves
684 * enough room in the tree block for a single
685 * item header. So, we split the item in place,
686 * adding a new header pointing to the existing
687 * bytes. Then we loop around again and we have
688 * a nicely formed csum item that we can neatly
689 * truncate.
691 offset = (bytenr - key.offset) >> blocksize_bits;
692 offset *= csum_size;
694 shift_len = (len >> blocksize_bits) * csum_size;
696 item_offset = btrfs_item_ptr_offset(leaf,
697 path->slots[0]);
699 memzero_extent_buffer(leaf, item_offset + offset,
700 shift_len);
701 key.offset = bytenr;
704 * btrfs_split_item returns -EAGAIN when the
705 * item changed size or key
707 ret = btrfs_split_item(trans, root, path, &key, offset);
708 if (ret && ret != -EAGAIN) {
709 btrfs_abort_transaction(trans, ret);
710 goto out;
713 key.offset = end_byte - 1;
714 } else {
715 truncate_one_csum(fs_info, path, &key, bytenr, len);
716 if (key.offset < bytenr)
717 break;
719 btrfs_release_path(path);
721 ret = 0;
722 out:
723 btrfs_free_path(path);
724 return ret;
727 int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
728 struct btrfs_root *root,
729 struct btrfs_ordered_sum *sums)
731 struct btrfs_fs_info *fs_info = root->fs_info;
732 struct btrfs_key file_key;
733 struct btrfs_key found_key;
734 struct btrfs_path *path;
735 struct btrfs_csum_item *item;
736 struct btrfs_csum_item *item_end;
737 struct extent_buffer *leaf = NULL;
738 u64 next_offset;
739 u64 total_bytes = 0;
740 u64 csum_offset;
741 u64 bytenr;
742 u32 nritems;
743 u32 ins_size;
744 int index = 0;
745 int found_next;
746 int ret;
747 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
749 path = btrfs_alloc_path();
750 if (!path)
751 return -ENOMEM;
752 again:
753 next_offset = (u64)-1;
754 found_next = 0;
755 bytenr = sums->bytenr + total_bytes;
756 file_key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
757 file_key.offset = bytenr;
758 file_key.type = BTRFS_EXTENT_CSUM_KEY;
760 item = btrfs_lookup_csum(trans, root, path, bytenr, 1);
761 if (!IS_ERR(item)) {
762 ret = 0;
763 leaf = path->nodes[0];
764 item_end = btrfs_item_ptr(leaf, path->slots[0],
765 struct btrfs_csum_item);
766 item_end = (struct btrfs_csum_item *)((char *)item_end +
767 btrfs_item_size_nr(leaf, path->slots[0]));
768 goto found;
770 ret = PTR_ERR(item);
771 if (ret != -EFBIG && ret != -ENOENT)
772 goto fail_unlock;
774 if (ret == -EFBIG) {
775 u32 item_size;
776 /* we found one, but it isn't big enough yet */
777 leaf = path->nodes[0];
778 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
779 if ((item_size / csum_size) >=
780 MAX_CSUM_ITEMS(fs_info, csum_size)) {
781 /* already at max size, make a new one */
782 goto insert;
784 } else {
785 int slot = path->slots[0] + 1;
786 /* we didn't find a csum item, insert one */
787 nritems = btrfs_header_nritems(path->nodes[0]);
788 if (!nritems || (path->slots[0] >= nritems - 1)) {
789 ret = btrfs_next_leaf(root, path);
790 if (ret == 1)
791 found_next = 1;
792 if (ret != 0)
793 goto insert;
794 slot = path->slots[0];
796 btrfs_item_key_to_cpu(path->nodes[0], &found_key, slot);
797 if (found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
798 found_key.type != BTRFS_EXTENT_CSUM_KEY) {
799 found_next = 1;
800 goto insert;
802 next_offset = found_key.offset;
803 found_next = 1;
804 goto insert;
808 * at this point, we know the tree has an item, but it isn't big
809 * enough yet to put our csum in. Grow it
811 btrfs_release_path(path);
812 ret = btrfs_search_slot(trans, root, &file_key, path,
813 csum_size, 1);
814 if (ret < 0)
815 goto fail_unlock;
817 if (ret > 0) {
818 if (path->slots[0] == 0)
819 goto insert;
820 path->slots[0]--;
823 leaf = path->nodes[0];
824 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
825 csum_offset = (bytenr - found_key.offset) >>
826 fs_info->sb->s_blocksize_bits;
828 if (found_key.type != BTRFS_EXTENT_CSUM_KEY ||
829 found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
830 csum_offset >= MAX_CSUM_ITEMS(fs_info, csum_size)) {
831 goto insert;
834 if (csum_offset == btrfs_item_size_nr(leaf, path->slots[0]) /
835 csum_size) {
836 int extend_nr;
837 u64 tmp;
838 u32 diff;
839 u32 free_space;
841 if (btrfs_leaf_free_space(leaf) <
842 sizeof(struct btrfs_item) + csum_size * 2)
843 goto insert;
845 free_space = btrfs_leaf_free_space(leaf) -
846 sizeof(struct btrfs_item) - csum_size;
847 tmp = sums->len - total_bytes;
848 tmp >>= fs_info->sb->s_blocksize_bits;
849 WARN_ON(tmp < 1);
851 extend_nr = max_t(int, 1, (int)tmp);
852 diff = (csum_offset + extend_nr) * csum_size;
853 diff = min(diff,
854 MAX_CSUM_ITEMS(fs_info, csum_size) * csum_size);
856 diff = diff - btrfs_item_size_nr(leaf, path->slots[0]);
857 diff = min(free_space, diff);
858 diff /= csum_size;
859 diff *= csum_size;
861 btrfs_extend_item(path, diff);
862 ret = 0;
863 goto csum;
866 insert:
867 btrfs_release_path(path);
868 csum_offset = 0;
869 if (found_next) {
870 u64 tmp;
872 tmp = sums->len - total_bytes;
873 tmp >>= fs_info->sb->s_blocksize_bits;
874 tmp = min(tmp, (next_offset - file_key.offset) >>
875 fs_info->sb->s_blocksize_bits);
877 tmp = max_t(u64, 1, tmp);
878 tmp = min_t(u64, tmp, MAX_CSUM_ITEMS(fs_info, csum_size));
879 ins_size = csum_size * tmp;
880 } else {
881 ins_size = csum_size;
883 path->leave_spinning = 1;
884 ret = btrfs_insert_empty_item(trans, root, path, &file_key,
885 ins_size);
886 path->leave_spinning = 0;
887 if (ret < 0)
888 goto fail_unlock;
889 if (WARN_ON(ret != 0))
890 goto fail_unlock;
891 leaf = path->nodes[0];
892 csum:
893 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_csum_item);
894 item_end = (struct btrfs_csum_item *)((unsigned char *)item +
895 btrfs_item_size_nr(leaf, path->slots[0]));
896 item = (struct btrfs_csum_item *)((unsigned char *)item +
897 csum_offset * csum_size);
898 found:
899 ins_size = (u32)(sums->len - total_bytes) >>
900 fs_info->sb->s_blocksize_bits;
901 ins_size *= csum_size;
902 ins_size = min_t(u32, (unsigned long)item_end - (unsigned long)item,
903 ins_size);
904 write_extent_buffer(leaf, sums->sums + index, (unsigned long)item,
905 ins_size);
907 ins_size /= csum_size;
908 total_bytes += ins_size * fs_info->sectorsize;
909 index += ins_size;
911 btrfs_mark_buffer_dirty(path->nodes[0]);
912 if (total_bytes < sums->len) {
913 btrfs_release_path(path);
914 cond_resched();
915 goto again;
917 out:
918 btrfs_free_path(path);
919 return ret;
921 fail_unlock:
922 goto out;
925 void btrfs_extent_item_to_extent_map(struct btrfs_inode *inode,
926 const struct btrfs_path *path,
927 struct btrfs_file_extent_item *fi,
928 const bool new_inline,
929 struct extent_map *em)
931 struct btrfs_fs_info *fs_info = inode->root->fs_info;
932 struct btrfs_root *root = inode->root;
933 struct extent_buffer *leaf = path->nodes[0];
934 const int slot = path->slots[0];
935 struct btrfs_key key;
936 u64 extent_start, extent_end;
937 u64 bytenr;
938 u8 type = btrfs_file_extent_type(leaf, fi);
939 int compress_type = btrfs_file_extent_compression(leaf, fi);
941 em->bdev = fs_info->fs_devices->latest_bdev;
942 btrfs_item_key_to_cpu(leaf, &key, slot);
943 extent_start = key.offset;
945 if (type == BTRFS_FILE_EXTENT_REG ||
946 type == BTRFS_FILE_EXTENT_PREALLOC) {
947 extent_end = extent_start +
948 btrfs_file_extent_num_bytes(leaf, fi);
949 } else if (type == BTRFS_FILE_EXTENT_INLINE) {
950 size_t size;
951 size = btrfs_file_extent_ram_bytes(leaf, fi);
952 extent_end = ALIGN(extent_start + size,
953 fs_info->sectorsize);
956 em->ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
957 if (type == BTRFS_FILE_EXTENT_REG ||
958 type == BTRFS_FILE_EXTENT_PREALLOC) {
959 em->start = extent_start;
960 em->len = extent_end - extent_start;
961 em->orig_start = extent_start -
962 btrfs_file_extent_offset(leaf, fi);
963 em->orig_block_len = btrfs_file_extent_disk_num_bytes(leaf, fi);
964 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
965 if (bytenr == 0) {
966 em->block_start = EXTENT_MAP_HOLE;
967 return;
969 if (compress_type != BTRFS_COMPRESS_NONE) {
970 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
971 em->compress_type = compress_type;
972 em->block_start = bytenr;
973 em->block_len = em->orig_block_len;
974 } else {
975 bytenr += btrfs_file_extent_offset(leaf, fi);
976 em->block_start = bytenr;
977 em->block_len = em->len;
978 if (type == BTRFS_FILE_EXTENT_PREALLOC)
979 set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
981 } else if (type == BTRFS_FILE_EXTENT_INLINE) {
982 em->block_start = EXTENT_MAP_INLINE;
983 em->start = extent_start;
984 em->len = extent_end - extent_start;
986 * Initialize orig_start and block_len with the same values
987 * as in inode.c:btrfs_get_extent().
989 em->orig_start = EXTENT_MAP_HOLE;
990 em->block_len = (u64)-1;
991 if (!new_inline && compress_type != BTRFS_COMPRESS_NONE) {
992 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
993 em->compress_type = compress_type;
995 } else {
996 btrfs_err(fs_info,
997 "unknown file extent item type %d, inode %llu, offset %llu, "
998 "root %llu", type, btrfs_ino(inode), extent_start,
999 root->root_key.objectid);