media: stv06xx: add missing descriptor sanity checks
[linux/fpc-iii.git] / fs / btrfs / file-item.c
blob6f18333e83c33cc3d6cd83692b5a4c9553a40e9b
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2007 Oracle. All rights reserved.
4 */
6 #include <linux/bio.h>
7 #include <linux/slab.h>
8 #include <linux/pagemap.h>
9 #include <linux/highmem.h>
10 #include <linux/sched/mm.h>
11 #include <crypto/hash.h>
12 #include "ctree.h"
13 #include "disk-io.h"
14 #include "transaction.h"
15 #include "volumes.h"
16 #include "print-tree.h"
17 #include "compression.h"
19 #define __MAX_CSUM_ITEMS(r, size) ((unsigned long)(((BTRFS_LEAF_DATA_SIZE(r) - \
20 sizeof(struct btrfs_item) * 2) / \
21 size) - 1))
23 #define MAX_CSUM_ITEMS(r, size) (min_t(u32, __MAX_CSUM_ITEMS(r, size), \
24 PAGE_SIZE))
26 static inline u32 max_ordered_sum_bytes(struct btrfs_fs_info *fs_info,
27 u16 csum_size)
29 u32 ncsums = (PAGE_SIZE - sizeof(struct btrfs_ordered_sum)) / csum_size;
31 return ncsums * fs_info->sectorsize;
34 int btrfs_insert_file_extent(struct btrfs_trans_handle *trans,
35 struct btrfs_root *root,
36 u64 objectid, u64 pos,
37 u64 disk_offset, u64 disk_num_bytes,
38 u64 num_bytes, u64 offset, u64 ram_bytes,
39 u8 compression, u8 encryption, u16 other_encoding)
41 int ret = 0;
42 struct btrfs_file_extent_item *item;
43 struct btrfs_key file_key;
44 struct btrfs_path *path;
45 struct extent_buffer *leaf;
47 path = btrfs_alloc_path();
48 if (!path)
49 return -ENOMEM;
50 file_key.objectid = objectid;
51 file_key.offset = pos;
52 file_key.type = BTRFS_EXTENT_DATA_KEY;
54 path->leave_spinning = 1;
55 ret = btrfs_insert_empty_item(trans, root, path, &file_key,
56 sizeof(*item));
57 if (ret < 0)
58 goto out;
59 BUG_ON(ret); /* Can't happen */
60 leaf = path->nodes[0];
61 item = btrfs_item_ptr(leaf, path->slots[0],
62 struct btrfs_file_extent_item);
63 btrfs_set_file_extent_disk_bytenr(leaf, item, disk_offset);
64 btrfs_set_file_extent_disk_num_bytes(leaf, item, disk_num_bytes);
65 btrfs_set_file_extent_offset(leaf, item, offset);
66 btrfs_set_file_extent_num_bytes(leaf, item, num_bytes);
67 btrfs_set_file_extent_ram_bytes(leaf, item, ram_bytes);
68 btrfs_set_file_extent_generation(leaf, item, trans->transid);
69 btrfs_set_file_extent_type(leaf, item, BTRFS_FILE_EXTENT_REG);
70 btrfs_set_file_extent_compression(leaf, item, compression);
71 btrfs_set_file_extent_encryption(leaf, item, encryption);
72 btrfs_set_file_extent_other_encoding(leaf, item, other_encoding);
74 btrfs_mark_buffer_dirty(leaf);
75 out:
76 btrfs_free_path(path);
77 return ret;
80 static struct btrfs_csum_item *
81 btrfs_lookup_csum(struct btrfs_trans_handle *trans,
82 struct btrfs_root *root,
83 struct btrfs_path *path,
84 u64 bytenr, int cow)
86 struct btrfs_fs_info *fs_info = root->fs_info;
87 int ret;
88 struct btrfs_key file_key;
89 struct btrfs_key found_key;
90 struct btrfs_csum_item *item;
91 struct extent_buffer *leaf;
92 u64 csum_offset = 0;
93 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
94 int csums_in_item;
96 file_key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
97 file_key.offset = bytenr;
98 file_key.type = BTRFS_EXTENT_CSUM_KEY;
99 ret = btrfs_search_slot(trans, root, &file_key, path, 0, cow);
100 if (ret < 0)
101 goto fail;
102 leaf = path->nodes[0];
103 if (ret > 0) {
104 ret = 1;
105 if (path->slots[0] == 0)
106 goto fail;
107 path->slots[0]--;
108 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
109 if (found_key.type != BTRFS_EXTENT_CSUM_KEY)
110 goto fail;
112 csum_offset = (bytenr - found_key.offset) >>
113 fs_info->sb->s_blocksize_bits;
114 csums_in_item = btrfs_item_size_nr(leaf, path->slots[0]);
115 csums_in_item /= csum_size;
117 if (csum_offset == csums_in_item) {
118 ret = -EFBIG;
119 goto fail;
120 } else if (csum_offset > csums_in_item) {
121 goto fail;
124 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_csum_item);
125 item = (struct btrfs_csum_item *)((unsigned char *)item +
126 csum_offset * csum_size);
127 return item;
128 fail:
129 if (ret > 0)
130 ret = -ENOENT;
131 return ERR_PTR(ret);
134 int btrfs_lookup_file_extent(struct btrfs_trans_handle *trans,
135 struct btrfs_root *root,
136 struct btrfs_path *path, u64 objectid,
137 u64 offset, int mod)
139 int ret;
140 struct btrfs_key file_key;
141 int ins_len = mod < 0 ? -1 : 0;
142 int cow = mod != 0;
144 file_key.objectid = objectid;
145 file_key.offset = offset;
146 file_key.type = BTRFS_EXTENT_DATA_KEY;
147 ret = btrfs_search_slot(trans, root, &file_key, path, ins_len, cow);
148 return ret;
151 static blk_status_t __btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio,
152 u64 logical_offset, u8 *dst, int dio)
154 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
155 struct bio_vec bvec;
156 struct bvec_iter iter;
157 struct btrfs_io_bio *btrfs_bio = btrfs_io_bio(bio);
158 struct btrfs_csum_item *item = NULL;
159 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
160 struct btrfs_path *path;
161 u8 *csum;
162 u64 offset = 0;
163 u64 item_start_offset = 0;
164 u64 item_last_offset = 0;
165 u64 disk_bytenr;
166 u64 page_bytes_left;
167 u32 diff;
168 int nblocks;
169 int count = 0;
170 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
172 path = btrfs_alloc_path();
173 if (!path)
174 return BLK_STS_RESOURCE;
176 nblocks = bio->bi_iter.bi_size >> inode->i_sb->s_blocksize_bits;
177 if (!dst) {
178 if (nblocks * csum_size > BTRFS_BIO_INLINE_CSUM_SIZE) {
179 btrfs_bio->csum = kmalloc_array(nblocks, csum_size,
180 GFP_NOFS);
181 if (!btrfs_bio->csum) {
182 btrfs_free_path(path);
183 return BLK_STS_RESOURCE;
185 } else {
186 btrfs_bio->csum = btrfs_bio->csum_inline;
188 csum = btrfs_bio->csum;
189 } else {
190 csum = dst;
193 if (bio->bi_iter.bi_size > PAGE_SIZE * 8)
194 path->reada = READA_FORWARD;
197 * the free space stuff is only read when it hasn't been
198 * updated in the current transaction. So, we can safely
199 * read from the commit root and sidestep a nasty deadlock
200 * between reading the free space cache and updating the csum tree.
202 if (btrfs_is_free_space_inode(BTRFS_I(inode))) {
203 path->search_commit_root = 1;
204 path->skip_locking = 1;
207 disk_bytenr = (u64)bio->bi_iter.bi_sector << 9;
208 if (dio)
209 offset = logical_offset;
211 bio_for_each_segment(bvec, bio, iter) {
212 page_bytes_left = bvec.bv_len;
213 if (count)
214 goto next;
216 if (!dio)
217 offset = page_offset(bvec.bv_page) + bvec.bv_offset;
218 count = btrfs_find_ordered_sum(inode, offset, disk_bytenr,
219 csum, nblocks);
220 if (count)
221 goto found;
223 if (!item || disk_bytenr < item_start_offset ||
224 disk_bytenr >= item_last_offset) {
225 struct btrfs_key found_key;
226 u32 item_size;
228 if (item)
229 btrfs_release_path(path);
230 item = btrfs_lookup_csum(NULL, fs_info->csum_root,
231 path, disk_bytenr, 0);
232 if (IS_ERR(item)) {
233 count = 1;
234 memset(csum, 0, csum_size);
235 if (BTRFS_I(inode)->root->root_key.objectid ==
236 BTRFS_DATA_RELOC_TREE_OBJECTID) {
237 set_extent_bits(io_tree, offset,
238 offset + fs_info->sectorsize - 1,
239 EXTENT_NODATASUM);
240 } else {
241 btrfs_info_rl(fs_info,
242 "no csum found for inode %llu start %llu",
243 btrfs_ino(BTRFS_I(inode)), offset);
245 item = NULL;
246 btrfs_release_path(path);
247 goto found;
249 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
250 path->slots[0]);
252 item_start_offset = found_key.offset;
253 item_size = btrfs_item_size_nr(path->nodes[0],
254 path->slots[0]);
255 item_last_offset = item_start_offset +
256 (item_size / csum_size) *
257 fs_info->sectorsize;
258 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
259 struct btrfs_csum_item);
262 * this byte range must be able to fit inside
263 * a single leaf so it will also fit inside a u32
265 diff = disk_bytenr - item_start_offset;
266 diff = diff / fs_info->sectorsize;
267 diff = diff * csum_size;
268 count = min_t(int, nblocks, (item_last_offset - disk_bytenr) >>
269 inode->i_sb->s_blocksize_bits);
270 read_extent_buffer(path->nodes[0], csum,
271 ((unsigned long)item) + diff,
272 csum_size * count);
273 found:
274 csum += count * csum_size;
275 nblocks -= count;
276 next:
277 while (count > 0) {
278 count--;
279 disk_bytenr += fs_info->sectorsize;
280 offset += fs_info->sectorsize;
281 page_bytes_left -= fs_info->sectorsize;
282 if (!page_bytes_left)
283 break; /* move to next bio */
287 WARN_ON_ONCE(count);
288 btrfs_free_path(path);
289 return 0;
292 blk_status_t btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio,
293 u8 *dst)
295 return __btrfs_lookup_bio_sums(inode, bio, 0, dst, 0);
298 blk_status_t btrfs_lookup_bio_sums_dio(struct inode *inode, struct bio *bio, u64 offset)
300 return __btrfs_lookup_bio_sums(inode, bio, offset, NULL, 1);
303 int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
304 struct list_head *list, int search_commit)
306 struct btrfs_fs_info *fs_info = root->fs_info;
307 struct btrfs_key key;
308 struct btrfs_path *path;
309 struct extent_buffer *leaf;
310 struct btrfs_ordered_sum *sums;
311 struct btrfs_csum_item *item;
312 LIST_HEAD(tmplist);
313 unsigned long offset;
314 int ret;
315 size_t size;
316 u64 csum_end;
317 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
319 ASSERT(IS_ALIGNED(start, fs_info->sectorsize) &&
320 IS_ALIGNED(end + 1, fs_info->sectorsize));
322 path = btrfs_alloc_path();
323 if (!path)
324 return -ENOMEM;
326 if (search_commit) {
327 path->skip_locking = 1;
328 path->reada = READA_FORWARD;
329 path->search_commit_root = 1;
332 key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
333 key.offset = start;
334 key.type = BTRFS_EXTENT_CSUM_KEY;
336 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
337 if (ret < 0)
338 goto fail;
339 if (ret > 0 && path->slots[0] > 0) {
340 leaf = path->nodes[0];
341 btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1);
342 if (key.objectid == BTRFS_EXTENT_CSUM_OBJECTID &&
343 key.type == BTRFS_EXTENT_CSUM_KEY) {
344 offset = (start - key.offset) >>
345 fs_info->sb->s_blocksize_bits;
346 if (offset * csum_size <
347 btrfs_item_size_nr(leaf, path->slots[0] - 1))
348 path->slots[0]--;
352 while (start <= end) {
353 leaf = path->nodes[0];
354 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
355 ret = btrfs_next_leaf(root, path);
356 if (ret < 0)
357 goto fail;
358 if (ret > 0)
359 break;
360 leaf = path->nodes[0];
363 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
364 if (key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
365 key.type != BTRFS_EXTENT_CSUM_KEY ||
366 key.offset > end)
367 break;
369 if (key.offset > start)
370 start = key.offset;
372 size = btrfs_item_size_nr(leaf, path->slots[0]);
373 csum_end = key.offset + (size / csum_size) * fs_info->sectorsize;
374 if (csum_end <= start) {
375 path->slots[0]++;
376 continue;
379 csum_end = min(csum_end, end + 1);
380 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
381 struct btrfs_csum_item);
382 while (start < csum_end) {
383 size = min_t(size_t, csum_end - start,
384 max_ordered_sum_bytes(fs_info, csum_size));
385 sums = kzalloc(btrfs_ordered_sum_size(fs_info, size),
386 GFP_NOFS);
387 if (!sums) {
388 ret = -ENOMEM;
389 goto fail;
392 sums->bytenr = start;
393 sums->len = (int)size;
395 offset = (start - key.offset) >>
396 fs_info->sb->s_blocksize_bits;
397 offset *= csum_size;
398 size >>= fs_info->sb->s_blocksize_bits;
400 read_extent_buffer(path->nodes[0],
401 sums->sums,
402 ((unsigned long)item) + offset,
403 csum_size * size);
405 start += fs_info->sectorsize * size;
406 list_add_tail(&sums->list, &tmplist);
408 path->slots[0]++;
410 ret = 0;
411 fail:
412 while (ret < 0 && !list_empty(&tmplist)) {
413 sums = list_entry(tmplist.next, struct btrfs_ordered_sum, list);
414 list_del(&sums->list);
415 kfree(sums);
417 list_splice_tail(&tmplist, list);
419 btrfs_free_path(path);
420 return ret;
424 * btrfs_csum_one_bio - Calculates checksums of the data contained inside a bio
425 * @inode: Owner of the data inside the bio
426 * @bio: Contains the data to be checksummed
427 * @file_start: offset in file this bio begins to describe
428 * @contig: Boolean. If true/1 means all bio vecs in this bio are
429 * contiguous and they begin at @file_start in the file. False/0
430 * means this bio can contains potentially discontigous bio vecs
431 * so the logical offset of each should be calculated separately.
433 blk_status_t btrfs_csum_one_bio(struct inode *inode, struct bio *bio,
434 u64 file_start, int contig)
436 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
437 SHASH_DESC_ON_STACK(shash, fs_info->csum_shash);
438 struct btrfs_ordered_sum *sums;
439 struct btrfs_ordered_extent *ordered = NULL;
440 char *data;
441 struct bvec_iter iter;
442 struct bio_vec bvec;
443 int index;
444 int nr_sectors;
445 unsigned long total_bytes = 0;
446 unsigned long this_sum_bytes = 0;
447 int i;
448 u64 offset;
449 unsigned nofs_flag;
450 const u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
452 nofs_flag = memalloc_nofs_save();
453 sums = kvzalloc(btrfs_ordered_sum_size(fs_info, bio->bi_iter.bi_size),
454 GFP_KERNEL);
455 memalloc_nofs_restore(nofs_flag);
457 if (!sums)
458 return BLK_STS_RESOURCE;
460 sums->len = bio->bi_iter.bi_size;
461 INIT_LIST_HEAD(&sums->list);
463 if (contig)
464 offset = file_start;
465 else
466 offset = 0; /* shut up gcc */
468 sums->bytenr = (u64)bio->bi_iter.bi_sector << 9;
469 index = 0;
471 shash->tfm = fs_info->csum_shash;
473 bio_for_each_segment(bvec, bio, iter) {
474 if (!contig)
475 offset = page_offset(bvec.bv_page) + bvec.bv_offset;
477 if (!ordered) {
478 ordered = btrfs_lookup_ordered_extent(inode, offset);
479 BUG_ON(!ordered); /* Logic error */
482 nr_sectors = BTRFS_BYTES_TO_BLKS(fs_info,
483 bvec.bv_len + fs_info->sectorsize
484 - 1);
486 for (i = 0; i < nr_sectors; i++) {
487 if (offset >= ordered->file_offset + ordered->len ||
488 offset < ordered->file_offset) {
489 unsigned long bytes_left;
491 sums->len = this_sum_bytes;
492 this_sum_bytes = 0;
493 btrfs_add_ordered_sum(ordered, sums);
494 btrfs_put_ordered_extent(ordered);
496 bytes_left = bio->bi_iter.bi_size - total_bytes;
498 nofs_flag = memalloc_nofs_save();
499 sums = kvzalloc(btrfs_ordered_sum_size(fs_info,
500 bytes_left), GFP_KERNEL);
501 memalloc_nofs_restore(nofs_flag);
502 BUG_ON(!sums); /* -ENOMEM */
503 sums->len = bytes_left;
504 ordered = btrfs_lookup_ordered_extent(inode,
505 offset);
506 ASSERT(ordered); /* Logic error */
507 sums->bytenr = ((u64)bio->bi_iter.bi_sector << 9)
508 + total_bytes;
509 index = 0;
512 crypto_shash_init(shash);
513 data = kmap_atomic(bvec.bv_page);
514 crypto_shash_update(shash, data + bvec.bv_offset
515 + (i * fs_info->sectorsize),
516 fs_info->sectorsize);
517 kunmap_atomic(data);
518 crypto_shash_final(shash, (char *)(sums->sums + index));
519 index += csum_size;
520 offset += fs_info->sectorsize;
521 this_sum_bytes += fs_info->sectorsize;
522 total_bytes += fs_info->sectorsize;
526 this_sum_bytes = 0;
527 btrfs_add_ordered_sum(ordered, sums);
528 btrfs_put_ordered_extent(ordered);
529 return 0;
533 * helper function for csum removal, this expects the
534 * key to describe the csum pointed to by the path, and it expects
535 * the csum to overlap the range [bytenr, len]
537 * The csum should not be entirely contained in the range and the
538 * range should not be entirely contained in the csum.
540 * This calls btrfs_truncate_item with the correct args based on the
541 * overlap, and fixes up the key as required.
543 static noinline void truncate_one_csum(struct btrfs_fs_info *fs_info,
544 struct btrfs_path *path,
545 struct btrfs_key *key,
546 u64 bytenr, u64 len)
548 struct extent_buffer *leaf;
549 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
550 u64 csum_end;
551 u64 end_byte = bytenr + len;
552 u32 blocksize_bits = fs_info->sb->s_blocksize_bits;
554 leaf = path->nodes[0];
555 csum_end = btrfs_item_size_nr(leaf, path->slots[0]) / csum_size;
556 csum_end <<= fs_info->sb->s_blocksize_bits;
557 csum_end += key->offset;
559 if (key->offset < bytenr && csum_end <= end_byte) {
561 * [ bytenr - len ]
562 * [ ]
563 * [csum ]
564 * A simple truncate off the end of the item
566 u32 new_size = (bytenr - key->offset) >> blocksize_bits;
567 new_size *= csum_size;
568 btrfs_truncate_item(path, new_size, 1);
569 } else if (key->offset >= bytenr && csum_end > end_byte &&
570 end_byte > key->offset) {
572 * [ bytenr - len ]
573 * [ ]
574 * [csum ]
575 * we need to truncate from the beginning of the csum
577 u32 new_size = (csum_end - end_byte) >> blocksize_bits;
578 new_size *= csum_size;
580 btrfs_truncate_item(path, new_size, 0);
582 key->offset = end_byte;
583 btrfs_set_item_key_safe(fs_info, path, key);
584 } else {
585 BUG();
590 * deletes the csum items from the csum tree for a given
591 * range of bytes.
593 int btrfs_del_csums(struct btrfs_trans_handle *trans,
594 struct btrfs_root *root, u64 bytenr, u64 len)
596 struct btrfs_fs_info *fs_info = trans->fs_info;
597 struct btrfs_path *path;
598 struct btrfs_key key;
599 u64 end_byte = bytenr + len;
600 u64 csum_end;
601 struct extent_buffer *leaf;
602 int ret;
603 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
604 int blocksize_bits = fs_info->sb->s_blocksize_bits;
606 ASSERT(root == fs_info->csum_root ||
607 root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID);
609 path = btrfs_alloc_path();
610 if (!path)
611 return -ENOMEM;
613 while (1) {
614 key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
615 key.offset = end_byte - 1;
616 key.type = BTRFS_EXTENT_CSUM_KEY;
618 path->leave_spinning = 1;
619 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
620 if (ret > 0) {
621 if (path->slots[0] == 0)
622 break;
623 path->slots[0]--;
624 } else if (ret < 0) {
625 break;
628 leaf = path->nodes[0];
629 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
631 if (key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
632 key.type != BTRFS_EXTENT_CSUM_KEY) {
633 break;
636 if (key.offset >= end_byte)
637 break;
639 csum_end = btrfs_item_size_nr(leaf, path->slots[0]) / csum_size;
640 csum_end <<= blocksize_bits;
641 csum_end += key.offset;
643 /* this csum ends before we start, we're done */
644 if (csum_end <= bytenr)
645 break;
647 /* delete the entire item, it is inside our range */
648 if (key.offset >= bytenr && csum_end <= end_byte) {
649 int del_nr = 1;
652 * Check how many csum items preceding this one in this
653 * leaf correspond to our range and then delete them all
654 * at once.
656 if (key.offset > bytenr && path->slots[0] > 0) {
657 int slot = path->slots[0] - 1;
659 while (slot >= 0) {
660 struct btrfs_key pk;
662 btrfs_item_key_to_cpu(leaf, &pk, slot);
663 if (pk.offset < bytenr ||
664 pk.type != BTRFS_EXTENT_CSUM_KEY ||
665 pk.objectid !=
666 BTRFS_EXTENT_CSUM_OBJECTID)
667 break;
668 path->slots[0] = slot;
669 del_nr++;
670 key.offset = pk.offset;
671 slot--;
674 ret = btrfs_del_items(trans, root, path,
675 path->slots[0], del_nr);
676 if (ret)
677 goto out;
678 if (key.offset == bytenr)
679 break;
680 } else if (key.offset < bytenr && csum_end > end_byte) {
681 unsigned long offset;
682 unsigned long shift_len;
683 unsigned long item_offset;
685 * [ bytenr - len ]
686 * [csum ]
688 * Our bytes are in the middle of the csum,
689 * we need to split this item and insert a new one.
691 * But we can't drop the path because the
692 * csum could change, get removed, extended etc.
694 * The trick here is the max size of a csum item leaves
695 * enough room in the tree block for a single
696 * item header. So, we split the item in place,
697 * adding a new header pointing to the existing
698 * bytes. Then we loop around again and we have
699 * a nicely formed csum item that we can neatly
700 * truncate.
702 offset = (bytenr - key.offset) >> blocksize_bits;
703 offset *= csum_size;
705 shift_len = (len >> blocksize_bits) * csum_size;
707 item_offset = btrfs_item_ptr_offset(leaf,
708 path->slots[0]);
710 memzero_extent_buffer(leaf, item_offset + offset,
711 shift_len);
712 key.offset = bytenr;
715 * btrfs_split_item returns -EAGAIN when the
716 * item changed size or key
718 ret = btrfs_split_item(trans, root, path, &key, offset);
719 if (ret && ret != -EAGAIN) {
720 btrfs_abort_transaction(trans, ret);
721 goto out;
724 key.offset = end_byte - 1;
725 } else {
726 truncate_one_csum(fs_info, path, &key, bytenr, len);
727 if (key.offset < bytenr)
728 break;
730 btrfs_release_path(path);
732 ret = 0;
733 out:
734 btrfs_free_path(path);
735 return ret;
738 int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
739 struct btrfs_root *root,
740 struct btrfs_ordered_sum *sums)
742 struct btrfs_fs_info *fs_info = root->fs_info;
743 struct btrfs_key file_key;
744 struct btrfs_key found_key;
745 struct btrfs_path *path;
746 struct btrfs_csum_item *item;
747 struct btrfs_csum_item *item_end;
748 struct extent_buffer *leaf = NULL;
749 u64 next_offset;
750 u64 total_bytes = 0;
751 u64 csum_offset;
752 u64 bytenr;
753 u32 nritems;
754 u32 ins_size;
755 int index = 0;
756 int found_next;
757 int ret;
758 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
760 path = btrfs_alloc_path();
761 if (!path)
762 return -ENOMEM;
763 again:
764 next_offset = (u64)-1;
765 found_next = 0;
766 bytenr = sums->bytenr + total_bytes;
767 file_key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
768 file_key.offset = bytenr;
769 file_key.type = BTRFS_EXTENT_CSUM_KEY;
771 item = btrfs_lookup_csum(trans, root, path, bytenr, 1);
772 if (!IS_ERR(item)) {
773 ret = 0;
774 leaf = path->nodes[0];
775 item_end = btrfs_item_ptr(leaf, path->slots[0],
776 struct btrfs_csum_item);
777 item_end = (struct btrfs_csum_item *)((char *)item_end +
778 btrfs_item_size_nr(leaf, path->slots[0]));
779 goto found;
781 ret = PTR_ERR(item);
782 if (ret != -EFBIG && ret != -ENOENT)
783 goto fail_unlock;
785 if (ret == -EFBIG) {
786 u32 item_size;
787 /* we found one, but it isn't big enough yet */
788 leaf = path->nodes[0];
789 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
790 if ((item_size / csum_size) >=
791 MAX_CSUM_ITEMS(fs_info, csum_size)) {
792 /* already at max size, make a new one */
793 goto insert;
795 } else {
796 int slot = path->slots[0] + 1;
797 /* we didn't find a csum item, insert one */
798 nritems = btrfs_header_nritems(path->nodes[0]);
799 if (!nritems || (path->slots[0] >= nritems - 1)) {
800 ret = btrfs_next_leaf(root, path);
801 if (ret == 1)
802 found_next = 1;
803 if (ret != 0)
804 goto insert;
805 slot = path->slots[0];
807 btrfs_item_key_to_cpu(path->nodes[0], &found_key, slot);
808 if (found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
809 found_key.type != BTRFS_EXTENT_CSUM_KEY) {
810 found_next = 1;
811 goto insert;
813 next_offset = found_key.offset;
814 found_next = 1;
815 goto insert;
819 * at this point, we know the tree has an item, but it isn't big
820 * enough yet to put our csum in. Grow it
822 btrfs_release_path(path);
823 ret = btrfs_search_slot(trans, root, &file_key, path,
824 csum_size, 1);
825 if (ret < 0)
826 goto fail_unlock;
828 if (ret > 0) {
829 if (path->slots[0] == 0)
830 goto insert;
831 path->slots[0]--;
834 leaf = path->nodes[0];
835 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
836 csum_offset = (bytenr - found_key.offset) >>
837 fs_info->sb->s_blocksize_bits;
839 if (found_key.type != BTRFS_EXTENT_CSUM_KEY ||
840 found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
841 csum_offset >= MAX_CSUM_ITEMS(fs_info, csum_size)) {
842 goto insert;
845 if (csum_offset == btrfs_item_size_nr(leaf, path->slots[0]) /
846 csum_size) {
847 int extend_nr;
848 u64 tmp;
849 u32 diff;
850 u32 free_space;
852 if (btrfs_leaf_free_space(leaf) <
853 sizeof(struct btrfs_item) + csum_size * 2)
854 goto insert;
856 free_space = btrfs_leaf_free_space(leaf) -
857 sizeof(struct btrfs_item) - csum_size;
858 tmp = sums->len - total_bytes;
859 tmp >>= fs_info->sb->s_blocksize_bits;
860 WARN_ON(tmp < 1);
862 extend_nr = max_t(int, 1, (int)tmp);
863 diff = (csum_offset + extend_nr) * csum_size;
864 diff = min(diff,
865 MAX_CSUM_ITEMS(fs_info, csum_size) * csum_size);
867 diff = diff - btrfs_item_size_nr(leaf, path->slots[0]);
868 diff = min(free_space, diff);
869 diff /= csum_size;
870 diff *= csum_size;
872 btrfs_extend_item(path, diff);
873 ret = 0;
874 goto csum;
877 insert:
878 btrfs_release_path(path);
879 csum_offset = 0;
880 if (found_next) {
881 u64 tmp;
883 tmp = sums->len - total_bytes;
884 tmp >>= fs_info->sb->s_blocksize_bits;
885 tmp = min(tmp, (next_offset - file_key.offset) >>
886 fs_info->sb->s_blocksize_bits);
888 tmp = max_t(u64, 1, tmp);
889 tmp = min_t(u64, tmp, MAX_CSUM_ITEMS(fs_info, csum_size));
890 ins_size = csum_size * tmp;
891 } else {
892 ins_size = csum_size;
894 path->leave_spinning = 1;
895 ret = btrfs_insert_empty_item(trans, root, path, &file_key,
896 ins_size);
897 path->leave_spinning = 0;
898 if (ret < 0)
899 goto fail_unlock;
900 if (WARN_ON(ret != 0))
901 goto fail_unlock;
902 leaf = path->nodes[0];
903 csum:
904 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_csum_item);
905 item_end = (struct btrfs_csum_item *)((unsigned char *)item +
906 btrfs_item_size_nr(leaf, path->slots[0]));
907 item = (struct btrfs_csum_item *)((unsigned char *)item +
908 csum_offset * csum_size);
909 found:
910 ins_size = (u32)(sums->len - total_bytes) >>
911 fs_info->sb->s_blocksize_bits;
912 ins_size *= csum_size;
913 ins_size = min_t(u32, (unsigned long)item_end - (unsigned long)item,
914 ins_size);
915 write_extent_buffer(leaf, sums->sums + index, (unsigned long)item,
916 ins_size);
918 index += ins_size;
919 ins_size /= csum_size;
920 total_bytes += ins_size * fs_info->sectorsize;
922 btrfs_mark_buffer_dirty(path->nodes[0]);
923 if (total_bytes < sums->len) {
924 btrfs_release_path(path);
925 cond_resched();
926 goto again;
928 out:
929 btrfs_free_path(path);
930 return ret;
932 fail_unlock:
933 goto out;
936 void btrfs_extent_item_to_extent_map(struct btrfs_inode *inode,
937 const struct btrfs_path *path,
938 struct btrfs_file_extent_item *fi,
939 const bool new_inline,
940 struct extent_map *em)
942 struct btrfs_fs_info *fs_info = inode->root->fs_info;
943 struct btrfs_root *root = inode->root;
944 struct extent_buffer *leaf = path->nodes[0];
945 const int slot = path->slots[0];
946 struct btrfs_key key;
947 u64 extent_start, extent_end;
948 u64 bytenr;
949 u8 type = btrfs_file_extent_type(leaf, fi);
950 int compress_type = btrfs_file_extent_compression(leaf, fi);
952 btrfs_item_key_to_cpu(leaf, &key, slot);
953 extent_start = key.offset;
955 if (type == BTRFS_FILE_EXTENT_REG ||
956 type == BTRFS_FILE_EXTENT_PREALLOC) {
957 extent_end = extent_start +
958 btrfs_file_extent_num_bytes(leaf, fi);
959 } else if (type == BTRFS_FILE_EXTENT_INLINE) {
960 size_t size;
961 size = btrfs_file_extent_ram_bytes(leaf, fi);
962 extent_end = ALIGN(extent_start + size,
963 fs_info->sectorsize);
966 em->ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
967 if (type == BTRFS_FILE_EXTENT_REG ||
968 type == BTRFS_FILE_EXTENT_PREALLOC) {
969 em->start = extent_start;
970 em->len = extent_end - extent_start;
971 em->orig_start = extent_start -
972 btrfs_file_extent_offset(leaf, fi);
973 em->orig_block_len = btrfs_file_extent_disk_num_bytes(leaf, fi);
974 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
975 if (bytenr == 0) {
976 em->block_start = EXTENT_MAP_HOLE;
977 return;
979 if (compress_type != BTRFS_COMPRESS_NONE) {
980 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
981 em->compress_type = compress_type;
982 em->block_start = bytenr;
983 em->block_len = em->orig_block_len;
984 } else {
985 bytenr += btrfs_file_extent_offset(leaf, fi);
986 em->block_start = bytenr;
987 em->block_len = em->len;
988 if (type == BTRFS_FILE_EXTENT_PREALLOC)
989 set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
991 } else if (type == BTRFS_FILE_EXTENT_INLINE) {
992 em->block_start = EXTENT_MAP_INLINE;
993 em->start = extent_start;
994 em->len = extent_end - extent_start;
996 * Initialize orig_start and block_len with the same values
997 * as in inode.c:btrfs_get_extent().
999 em->orig_start = EXTENT_MAP_HOLE;
1000 em->block_len = (u64)-1;
1001 if (!new_inline && compress_type != BTRFS_COMPRESS_NONE) {
1002 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
1003 em->compress_type = compress_type;
1005 } else {
1006 btrfs_err(fs_info,
1007 "unknown file extent item type %d, inode %llu, offset %llu, "
1008 "root %llu", type, btrfs_ino(inode), extent_start,
1009 root->root_key.objectid);