HID: hiddev: Fix slab-out-of-bounds write in hiddev_ioctl_usage()
[linux/fpc-iii.git] / fs / btrfs / file-item.c
blobfb5c97ea670ff274c6a5fb589650fad5b72fbcf4
1 /*
2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/bio.h>
20 #include <linux/slab.h>
21 #include <linux/pagemap.h>
22 #include <linux/highmem.h>
23 #include "ctree.h"
24 #include "disk-io.h"
25 #include "transaction.h"
26 #include "volumes.h"
27 #include "print-tree.h"
29 #define __MAX_CSUM_ITEMS(r, size) ((unsigned long)(((BTRFS_LEAF_DATA_SIZE(r) - \
30 sizeof(struct btrfs_item) * 2) / \
31 size) - 1))
33 #define MAX_CSUM_ITEMS(r, size) (min_t(u32, __MAX_CSUM_ITEMS(r, size), \
34 PAGE_CACHE_SIZE))
36 #define MAX_ORDERED_SUM_BYTES(r) ((PAGE_SIZE - \
37 sizeof(struct btrfs_ordered_sum)) / \
38 sizeof(u32) * (r)->sectorsize)
40 int btrfs_insert_file_extent(struct btrfs_trans_handle *trans,
41 struct btrfs_root *root,
42 u64 objectid, u64 pos,
43 u64 disk_offset, u64 disk_num_bytes,
44 u64 num_bytes, u64 offset, u64 ram_bytes,
45 u8 compression, u8 encryption, u16 other_encoding)
47 int ret = 0;
48 struct btrfs_file_extent_item *item;
49 struct btrfs_key file_key;
50 struct btrfs_path *path;
51 struct extent_buffer *leaf;
53 path = btrfs_alloc_path();
54 if (!path)
55 return -ENOMEM;
56 file_key.objectid = objectid;
57 file_key.offset = pos;
58 file_key.type = BTRFS_EXTENT_DATA_KEY;
60 path->leave_spinning = 1;
61 ret = btrfs_insert_empty_item(trans, root, path, &file_key,
62 sizeof(*item));
63 if (ret < 0)
64 goto out;
65 BUG_ON(ret); /* Can't happen */
66 leaf = path->nodes[0];
67 item = btrfs_item_ptr(leaf, path->slots[0],
68 struct btrfs_file_extent_item);
69 btrfs_set_file_extent_disk_bytenr(leaf, item, disk_offset);
70 btrfs_set_file_extent_disk_num_bytes(leaf, item, disk_num_bytes);
71 btrfs_set_file_extent_offset(leaf, item, offset);
72 btrfs_set_file_extent_num_bytes(leaf, item, num_bytes);
73 btrfs_set_file_extent_ram_bytes(leaf, item, ram_bytes);
74 btrfs_set_file_extent_generation(leaf, item, trans->transid);
75 btrfs_set_file_extent_type(leaf, item, BTRFS_FILE_EXTENT_REG);
76 btrfs_set_file_extent_compression(leaf, item, compression);
77 btrfs_set_file_extent_encryption(leaf, item, encryption);
78 btrfs_set_file_extent_other_encoding(leaf, item, other_encoding);
80 btrfs_mark_buffer_dirty(leaf);
81 out:
82 btrfs_free_path(path);
83 return ret;
86 static struct btrfs_csum_item *
87 btrfs_lookup_csum(struct btrfs_trans_handle *trans,
88 struct btrfs_root *root,
89 struct btrfs_path *path,
90 u64 bytenr, int cow)
92 int ret;
93 struct btrfs_key file_key;
94 struct btrfs_key found_key;
95 struct btrfs_csum_item *item;
96 struct extent_buffer *leaf;
97 u64 csum_offset = 0;
98 u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
99 int csums_in_item;
101 file_key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
102 file_key.offset = bytenr;
103 file_key.type = BTRFS_EXTENT_CSUM_KEY;
104 ret = btrfs_search_slot(trans, root, &file_key, path, 0, cow);
105 if (ret < 0)
106 goto fail;
107 leaf = path->nodes[0];
108 if (ret > 0) {
109 ret = 1;
110 if (path->slots[0] == 0)
111 goto fail;
112 path->slots[0]--;
113 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
114 if (found_key.type != BTRFS_EXTENT_CSUM_KEY)
115 goto fail;
117 csum_offset = (bytenr - found_key.offset) >>
118 root->fs_info->sb->s_blocksize_bits;
119 csums_in_item = btrfs_item_size_nr(leaf, path->slots[0]);
120 csums_in_item /= csum_size;
122 if (csum_offset == csums_in_item) {
123 ret = -EFBIG;
124 goto fail;
125 } else if (csum_offset > csums_in_item) {
126 goto fail;
129 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_csum_item);
130 item = (struct btrfs_csum_item *)((unsigned char *)item +
131 csum_offset * csum_size);
132 return item;
133 fail:
134 if (ret > 0)
135 ret = -ENOENT;
136 return ERR_PTR(ret);
139 int btrfs_lookup_file_extent(struct btrfs_trans_handle *trans,
140 struct btrfs_root *root,
141 struct btrfs_path *path, u64 objectid,
142 u64 offset, int mod)
144 int ret;
145 struct btrfs_key file_key;
146 int ins_len = mod < 0 ? -1 : 0;
147 int cow = mod != 0;
149 file_key.objectid = objectid;
150 file_key.offset = offset;
151 file_key.type = BTRFS_EXTENT_DATA_KEY;
152 ret = btrfs_search_slot(trans, root, &file_key, path, ins_len, cow);
153 return ret;
156 static void btrfs_io_bio_endio_readpage(struct btrfs_io_bio *bio, int err)
158 kfree(bio->csum_allocated);
161 static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
162 struct inode *inode, struct bio *bio,
163 u64 logical_offset, u32 *dst, int dio)
165 struct bio_vec *bvec = bio->bi_io_vec;
166 struct btrfs_io_bio *btrfs_bio = btrfs_io_bio(bio);
167 struct btrfs_csum_item *item = NULL;
168 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
169 struct btrfs_path *path;
170 u8 *csum;
171 u64 offset = 0;
172 u64 item_start_offset = 0;
173 u64 item_last_offset = 0;
174 u64 disk_bytenr;
175 u32 diff;
176 int nblocks;
177 int bio_index = 0;
178 int count;
179 u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
181 path = btrfs_alloc_path();
182 if (!path)
183 return -ENOMEM;
185 nblocks = bio->bi_iter.bi_size >> inode->i_sb->s_blocksize_bits;
186 if (!dst) {
187 if (nblocks * csum_size > BTRFS_BIO_INLINE_CSUM_SIZE) {
188 btrfs_bio->csum_allocated = kmalloc_array(nblocks,
189 csum_size, GFP_NOFS);
190 if (!btrfs_bio->csum_allocated) {
191 btrfs_free_path(path);
192 return -ENOMEM;
194 btrfs_bio->csum = btrfs_bio->csum_allocated;
195 btrfs_bio->end_io = btrfs_io_bio_endio_readpage;
196 } else {
197 btrfs_bio->csum = btrfs_bio->csum_inline;
199 csum = btrfs_bio->csum;
200 } else {
201 csum = (u8 *)dst;
204 if (bio->bi_iter.bi_size > PAGE_CACHE_SIZE * 8)
205 path->reada = 2;
207 WARN_ON(bio->bi_vcnt <= 0);
210 * the free space stuff is only read when it hasn't been
211 * updated in the current transaction. So, we can safely
212 * read from the commit root and sidestep a nasty deadlock
213 * between reading the free space cache and updating the csum tree.
215 if (btrfs_is_free_space_inode(inode)) {
216 path->search_commit_root = 1;
217 path->skip_locking = 1;
220 disk_bytenr = (u64)bio->bi_iter.bi_sector << 9;
221 if (dio)
222 offset = logical_offset;
223 while (bio_index < bio->bi_vcnt) {
224 if (!dio)
225 offset = page_offset(bvec->bv_page) + bvec->bv_offset;
226 count = btrfs_find_ordered_sum(inode, offset, disk_bytenr,
227 (u32 *)csum, nblocks);
228 if (count)
229 goto found;
231 if (!item || disk_bytenr < item_start_offset ||
232 disk_bytenr >= item_last_offset) {
233 struct btrfs_key found_key;
234 u32 item_size;
236 if (item)
237 btrfs_release_path(path);
238 item = btrfs_lookup_csum(NULL, root->fs_info->csum_root,
239 path, disk_bytenr, 0);
240 if (IS_ERR(item)) {
241 count = 1;
242 memset(csum, 0, csum_size);
243 if (BTRFS_I(inode)->root->root_key.objectid ==
244 BTRFS_DATA_RELOC_TREE_OBJECTID) {
245 set_extent_bits(io_tree, offset,
246 offset + bvec->bv_len - 1,
247 EXTENT_NODATASUM, GFP_NOFS);
248 } else {
249 btrfs_info(BTRFS_I(inode)->root->fs_info,
250 "no csum found for inode %llu start %llu",
251 btrfs_ino(inode), offset);
253 item = NULL;
254 btrfs_release_path(path);
255 goto found;
257 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
258 path->slots[0]);
260 item_start_offset = found_key.offset;
261 item_size = btrfs_item_size_nr(path->nodes[0],
262 path->slots[0]);
263 item_last_offset = item_start_offset +
264 (item_size / csum_size) *
265 root->sectorsize;
266 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
267 struct btrfs_csum_item);
270 * this byte range must be able to fit inside
271 * a single leaf so it will also fit inside a u32
273 diff = disk_bytenr - item_start_offset;
274 diff = diff / root->sectorsize;
275 diff = diff * csum_size;
276 count = min_t(int, nblocks, (item_last_offset - disk_bytenr) >>
277 inode->i_sb->s_blocksize_bits);
278 read_extent_buffer(path->nodes[0], csum,
279 ((unsigned long)item) + diff,
280 csum_size * count);
281 found:
282 csum += count * csum_size;
283 nblocks -= count;
284 bio_index += count;
285 while (count--) {
286 disk_bytenr += bvec->bv_len;
287 offset += bvec->bv_len;
288 bvec++;
291 btrfs_free_path(path);
292 return 0;
295 int btrfs_lookup_bio_sums(struct btrfs_root *root, struct inode *inode,
296 struct bio *bio, u32 *dst)
298 return __btrfs_lookup_bio_sums(root, inode, bio, 0, dst, 0);
301 int btrfs_lookup_bio_sums_dio(struct btrfs_root *root, struct inode *inode,
302 struct bio *bio, u64 offset)
304 return __btrfs_lookup_bio_sums(root, inode, bio, offset, NULL, 1);
307 int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
308 struct list_head *list, int search_commit)
310 struct btrfs_key key;
311 struct btrfs_path *path;
312 struct extent_buffer *leaf;
313 struct btrfs_ordered_sum *sums;
314 struct btrfs_csum_item *item;
315 LIST_HEAD(tmplist);
316 unsigned long offset;
317 int ret;
318 size_t size;
319 u64 csum_end;
320 u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
322 ASSERT(IS_ALIGNED(start, root->sectorsize) &&
323 IS_ALIGNED(end + 1, root->sectorsize));
325 path = btrfs_alloc_path();
326 if (!path)
327 return -ENOMEM;
329 if (search_commit) {
330 path->skip_locking = 1;
331 path->reada = 2;
332 path->search_commit_root = 1;
335 key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
336 key.offset = start;
337 key.type = BTRFS_EXTENT_CSUM_KEY;
339 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
340 if (ret < 0)
341 goto fail;
342 if (ret > 0 && path->slots[0] > 0) {
343 leaf = path->nodes[0];
344 btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1);
345 if (key.objectid == BTRFS_EXTENT_CSUM_OBJECTID &&
346 key.type == BTRFS_EXTENT_CSUM_KEY) {
347 offset = (start - key.offset) >>
348 root->fs_info->sb->s_blocksize_bits;
349 if (offset * csum_size <
350 btrfs_item_size_nr(leaf, path->slots[0] - 1))
351 path->slots[0]--;
355 while (start <= end) {
356 leaf = path->nodes[0];
357 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
358 ret = btrfs_next_leaf(root, path);
359 if (ret < 0)
360 goto fail;
361 if (ret > 0)
362 break;
363 leaf = path->nodes[0];
366 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
367 if (key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
368 key.type != BTRFS_EXTENT_CSUM_KEY ||
369 key.offset > end)
370 break;
372 if (key.offset > start)
373 start = key.offset;
375 size = btrfs_item_size_nr(leaf, path->slots[0]);
376 csum_end = key.offset + (size / csum_size) * root->sectorsize;
377 if (csum_end <= start) {
378 path->slots[0]++;
379 continue;
382 csum_end = min(csum_end, end + 1);
383 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
384 struct btrfs_csum_item);
385 while (start < csum_end) {
386 size = min_t(size_t, csum_end - start,
387 MAX_ORDERED_SUM_BYTES(root));
388 sums = kzalloc(btrfs_ordered_sum_size(root, size),
389 GFP_NOFS);
390 if (!sums) {
391 ret = -ENOMEM;
392 goto fail;
395 sums->bytenr = start;
396 sums->len = (int)size;
398 offset = (start - key.offset) >>
399 root->fs_info->sb->s_blocksize_bits;
400 offset *= csum_size;
401 size >>= root->fs_info->sb->s_blocksize_bits;
403 read_extent_buffer(path->nodes[0],
404 sums->sums,
405 ((unsigned long)item) + offset,
406 csum_size * size);
408 start += root->sectorsize * size;
409 list_add_tail(&sums->list, &tmplist);
411 path->slots[0]++;
413 ret = 0;
414 fail:
415 while (ret < 0 && !list_empty(&tmplist)) {
416 sums = list_entry(tmplist.next, struct btrfs_ordered_sum, list);
417 list_del(&sums->list);
418 kfree(sums);
420 list_splice_tail(&tmplist, list);
422 btrfs_free_path(path);
423 return ret;
426 int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
427 struct bio *bio, u64 file_start, int contig)
429 struct btrfs_ordered_sum *sums;
430 struct btrfs_ordered_extent *ordered;
431 char *data;
432 struct bio_vec *bvec = bio->bi_io_vec;
433 int bio_index = 0;
434 int index;
435 unsigned long total_bytes = 0;
436 unsigned long this_sum_bytes = 0;
437 u64 offset;
439 WARN_ON(bio->bi_vcnt <= 0);
440 sums = kzalloc(btrfs_ordered_sum_size(root, bio->bi_iter.bi_size),
441 GFP_NOFS);
442 if (!sums)
443 return -ENOMEM;
445 sums->len = bio->bi_iter.bi_size;
446 INIT_LIST_HEAD(&sums->list);
448 if (contig)
449 offset = file_start;
450 else
451 offset = page_offset(bvec->bv_page) + bvec->bv_offset;
453 ordered = btrfs_lookup_ordered_extent(inode, offset);
454 BUG_ON(!ordered); /* Logic error */
455 sums->bytenr = (u64)bio->bi_iter.bi_sector << 9;
456 index = 0;
458 while (bio_index < bio->bi_vcnt) {
459 if (!contig)
460 offset = page_offset(bvec->bv_page) + bvec->bv_offset;
462 if (offset >= ordered->file_offset + ordered->len ||
463 offset < ordered->file_offset) {
464 unsigned long bytes_left;
465 sums->len = this_sum_bytes;
466 this_sum_bytes = 0;
467 btrfs_add_ordered_sum(inode, ordered, sums);
468 btrfs_put_ordered_extent(ordered);
470 bytes_left = bio->bi_iter.bi_size - total_bytes;
472 sums = kzalloc(btrfs_ordered_sum_size(root, bytes_left),
473 GFP_NOFS);
474 BUG_ON(!sums); /* -ENOMEM */
475 sums->len = bytes_left;
476 ordered = btrfs_lookup_ordered_extent(inode, offset);
477 BUG_ON(!ordered); /* Logic error */
478 sums->bytenr = ((u64)bio->bi_iter.bi_sector << 9) +
479 total_bytes;
480 index = 0;
483 data = kmap_atomic(bvec->bv_page);
484 sums->sums[index] = ~(u32)0;
485 sums->sums[index] = btrfs_csum_data(data + bvec->bv_offset,
486 sums->sums[index],
487 bvec->bv_len);
488 kunmap_atomic(data);
489 btrfs_csum_final(sums->sums[index],
490 (char *)(sums->sums + index));
492 bio_index++;
493 index++;
494 total_bytes += bvec->bv_len;
495 this_sum_bytes += bvec->bv_len;
496 offset += bvec->bv_len;
497 bvec++;
499 this_sum_bytes = 0;
500 btrfs_add_ordered_sum(inode, ordered, sums);
501 btrfs_put_ordered_extent(ordered);
502 return 0;
506 * helper function for csum removal, this expects the
507 * key to describe the csum pointed to by the path, and it expects
508 * the csum to overlap the range [bytenr, len]
510 * The csum should not be entirely contained in the range and the
511 * range should not be entirely contained in the csum.
513 * This calls btrfs_truncate_item with the correct args based on the
514 * overlap, and fixes up the key as required.
516 static noinline void truncate_one_csum(struct btrfs_root *root,
517 struct btrfs_path *path,
518 struct btrfs_key *key,
519 u64 bytenr, u64 len)
521 struct extent_buffer *leaf;
522 u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
523 u64 csum_end;
524 u64 end_byte = bytenr + len;
525 u32 blocksize_bits = root->fs_info->sb->s_blocksize_bits;
527 leaf = path->nodes[0];
528 csum_end = btrfs_item_size_nr(leaf, path->slots[0]) / csum_size;
529 csum_end <<= root->fs_info->sb->s_blocksize_bits;
530 csum_end += key->offset;
532 if (key->offset < bytenr && csum_end <= end_byte) {
534 * [ bytenr - len ]
535 * [ ]
536 * [csum ]
537 * A simple truncate off the end of the item
539 u32 new_size = (bytenr - key->offset) >> blocksize_bits;
540 new_size *= csum_size;
541 btrfs_truncate_item(root, path, new_size, 1);
542 } else if (key->offset >= bytenr && csum_end > end_byte &&
543 end_byte > key->offset) {
545 * [ bytenr - len ]
546 * [ ]
547 * [csum ]
548 * we need to truncate from the beginning of the csum
550 u32 new_size = (csum_end - end_byte) >> blocksize_bits;
551 new_size *= csum_size;
553 btrfs_truncate_item(root, path, new_size, 0);
555 key->offset = end_byte;
556 btrfs_set_item_key_safe(root->fs_info, path, key);
557 } else {
558 BUG();
563 * deletes the csum items from the csum tree for a given
564 * range of bytes.
566 int btrfs_del_csums(struct btrfs_trans_handle *trans,
567 struct btrfs_root *root, u64 bytenr, u64 len)
569 struct btrfs_path *path;
570 struct btrfs_key key;
571 u64 end_byte = bytenr + len;
572 u64 csum_end;
573 struct extent_buffer *leaf;
574 int ret;
575 u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
576 int blocksize_bits = root->fs_info->sb->s_blocksize_bits;
578 root = root->fs_info->csum_root;
580 path = btrfs_alloc_path();
581 if (!path)
582 return -ENOMEM;
584 while (1) {
585 key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
586 key.offset = end_byte - 1;
587 key.type = BTRFS_EXTENT_CSUM_KEY;
589 path->leave_spinning = 1;
590 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
591 if (ret > 0) {
592 if (path->slots[0] == 0)
593 break;
594 path->slots[0]--;
595 } else if (ret < 0) {
596 break;
599 leaf = path->nodes[0];
600 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
602 if (key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
603 key.type != BTRFS_EXTENT_CSUM_KEY) {
604 break;
607 if (key.offset >= end_byte)
608 break;
610 csum_end = btrfs_item_size_nr(leaf, path->slots[0]) / csum_size;
611 csum_end <<= blocksize_bits;
612 csum_end += key.offset;
614 /* this csum ends before we start, we're done */
615 if (csum_end <= bytenr)
616 break;
618 /* delete the entire item, it is inside our range */
619 if (key.offset >= bytenr && csum_end <= end_byte) {
620 ret = btrfs_del_item(trans, root, path);
621 if (ret)
622 goto out;
623 if (key.offset == bytenr)
624 break;
625 } else if (key.offset < bytenr && csum_end > end_byte) {
626 unsigned long offset;
627 unsigned long shift_len;
628 unsigned long item_offset;
630 * [ bytenr - len ]
631 * [csum ]
633 * Our bytes are in the middle of the csum,
634 * we need to split this item and insert a new one.
636 * But we can't drop the path because the
637 * csum could change, get removed, extended etc.
639 * The trick here is the max size of a csum item leaves
640 * enough room in the tree block for a single
641 * item header. So, we split the item in place,
642 * adding a new header pointing to the existing
643 * bytes. Then we loop around again and we have
644 * a nicely formed csum item that we can neatly
645 * truncate.
647 offset = (bytenr - key.offset) >> blocksize_bits;
648 offset *= csum_size;
650 shift_len = (len >> blocksize_bits) * csum_size;
652 item_offset = btrfs_item_ptr_offset(leaf,
653 path->slots[0]);
655 memset_extent_buffer(leaf, 0, item_offset + offset,
656 shift_len);
657 key.offset = bytenr;
660 * btrfs_split_item returns -EAGAIN when the
661 * item changed size or key
663 ret = btrfs_split_item(trans, root, path, &key, offset);
664 if (ret && ret != -EAGAIN) {
665 btrfs_abort_transaction(trans, root, ret);
666 goto out;
669 key.offset = end_byte - 1;
670 } else {
671 truncate_one_csum(root, path, &key, bytenr, len);
672 if (key.offset < bytenr)
673 break;
675 btrfs_release_path(path);
677 ret = 0;
678 out:
679 btrfs_free_path(path);
680 return ret;
683 int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
684 struct btrfs_root *root,
685 struct btrfs_ordered_sum *sums)
687 struct btrfs_key file_key;
688 struct btrfs_key found_key;
689 struct btrfs_path *path;
690 struct btrfs_csum_item *item;
691 struct btrfs_csum_item *item_end;
692 struct extent_buffer *leaf = NULL;
693 u64 next_offset;
694 u64 total_bytes = 0;
695 u64 csum_offset;
696 u64 bytenr;
697 u32 nritems;
698 u32 ins_size;
699 int index = 0;
700 int found_next;
701 int ret;
702 u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
704 path = btrfs_alloc_path();
705 if (!path)
706 return -ENOMEM;
707 again:
708 next_offset = (u64)-1;
709 found_next = 0;
710 bytenr = sums->bytenr + total_bytes;
711 file_key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
712 file_key.offset = bytenr;
713 file_key.type = BTRFS_EXTENT_CSUM_KEY;
715 item = btrfs_lookup_csum(trans, root, path, bytenr, 1);
716 if (!IS_ERR(item)) {
717 ret = 0;
718 leaf = path->nodes[0];
719 item_end = btrfs_item_ptr(leaf, path->slots[0],
720 struct btrfs_csum_item);
721 item_end = (struct btrfs_csum_item *)((char *)item_end +
722 btrfs_item_size_nr(leaf, path->slots[0]));
723 goto found;
725 ret = PTR_ERR(item);
726 if (ret != -EFBIG && ret != -ENOENT)
727 goto fail_unlock;
729 if (ret == -EFBIG) {
730 u32 item_size;
731 /* we found one, but it isn't big enough yet */
732 leaf = path->nodes[0];
733 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
734 if ((item_size / csum_size) >=
735 MAX_CSUM_ITEMS(root, csum_size)) {
736 /* already at max size, make a new one */
737 goto insert;
739 } else {
740 int slot = path->slots[0] + 1;
741 /* we didn't find a csum item, insert one */
742 nritems = btrfs_header_nritems(path->nodes[0]);
743 if (!nritems || (path->slots[0] >= nritems - 1)) {
744 ret = btrfs_next_leaf(root, path);
745 if (ret < 0) {
746 goto out;
747 } else if (ret > 0) {
748 found_next = 1;
749 goto insert;
751 slot = path->slots[0];
753 btrfs_item_key_to_cpu(path->nodes[0], &found_key, slot);
754 if (found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
755 found_key.type != BTRFS_EXTENT_CSUM_KEY) {
756 found_next = 1;
757 goto insert;
759 next_offset = found_key.offset;
760 found_next = 1;
761 goto insert;
765 * at this point, we know the tree has an item, but it isn't big
766 * enough yet to put our csum in. Grow it
768 btrfs_release_path(path);
769 ret = btrfs_search_slot(trans, root, &file_key, path,
770 csum_size, 1);
771 if (ret < 0)
772 goto fail_unlock;
774 if (ret > 0) {
775 if (path->slots[0] == 0)
776 goto insert;
777 path->slots[0]--;
780 leaf = path->nodes[0];
781 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
782 csum_offset = (bytenr - found_key.offset) >>
783 root->fs_info->sb->s_blocksize_bits;
785 if (found_key.type != BTRFS_EXTENT_CSUM_KEY ||
786 found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
787 csum_offset >= MAX_CSUM_ITEMS(root, csum_size)) {
788 goto insert;
791 if (csum_offset == btrfs_item_size_nr(leaf, path->slots[0]) /
792 csum_size) {
793 int extend_nr;
794 u64 tmp;
795 u32 diff;
796 u32 free_space;
798 if (btrfs_leaf_free_space(root, leaf) <
799 sizeof(struct btrfs_item) + csum_size * 2)
800 goto insert;
802 free_space = btrfs_leaf_free_space(root, leaf) -
803 sizeof(struct btrfs_item) - csum_size;
804 tmp = sums->len - total_bytes;
805 tmp >>= root->fs_info->sb->s_blocksize_bits;
806 WARN_ON(tmp < 1);
808 extend_nr = max_t(int, 1, (int)tmp);
809 diff = (csum_offset + extend_nr) * csum_size;
810 diff = min(diff, MAX_CSUM_ITEMS(root, csum_size) * csum_size);
812 diff = diff - btrfs_item_size_nr(leaf, path->slots[0]);
813 diff = min(free_space, diff);
814 diff /= csum_size;
815 diff *= csum_size;
817 btrfs_extend_item(root, path, diff);
818 ret = 0;
819 goto csum;
822 insert:
823 btrfs_release_path(path);
824 csum_offset = 0;
825 if (found_next) {
826 u64 tmp;
828 tmp = sums->len - total_bytes;
829 tmp >>= root->fs_info->sb->s_blocksize_bits;
830 tmp = min(tmp, (next_offset - file_key.offset) >>
831 root->fs_info->sb->s_blocksize_bits);
833 tmp = max((u64)1, tmp);
834 tmp = min(tmp, (u64)MAX_CSUM_ITEMS(root, csum_size));
835 ins_size = csum_size * tmp;
836 } else {
837 ins_size = csum_size;
839 path->leave_spinning = 1;
840 ret = btrfs_insert_empty_item(trans, root, path, &file_key,
841 ins_size);
842 path->leave_spinning = 0;
843 if (ret < 0)
844 goto fail_unlock;
845 if (WARN_ON(ret != 0))
846 goto fail_unlock;
847 leaf = path->nodes[0];
848 csum:
849 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_csum_item);
850 item_end = (struct btrfs_csum_item *)((unsigned char *)item +
851 btrfs_item_size_nr(leaf, path->slots[0]));
852 item = (struct btrfs_csum_item *)((unsigned char *)item +
853 csum_offset * csum_size);
854 found:
855 ins_size = (u32)(sums->len - total_bytes) >>
856 root->fs_info->sb->s_blocksize_bits;
857 ins_size *= csum_size;
858 ins_size = min_t(u32, (unsigned long)item_end - (unsigned long)item,
859 ins_size);
860 write_extent_buffer(leaf, sums->sums + index, (unsigned long)item,
861 ins_size);
863 ins_size /= csum_size;
864 total_bytes += ins_size * root->sectorsize;
865 index += ins_size;
867 btrfs_mark_buffer_dirty(path->nodes[0]);
868 if (total_bytes < sums->len) {
869 btrfs_release_path(path);
870 cond_resched();
871 goto again;
873 out:
874 btrfs_free_path(path);
875 return ret;
877 fail_unlock:
878 goto out;
881 void btrfs_extent_item_to_extent_map(struct inode *inode,
882 const struct btrfs_path *path,
883 struct btrfs_file_extent_item *fi,
884 const bool new_inline,
885 struct extent_map *em)
887 struct btrfs_root *root = BTRFS_I(inode)->root;
888 struct extent_buffer *leaf = path->nodes[0];
889 const int slot = path->slots[0];
890 struct btrfs_key key;
891 u64 extent_start, extent_end;
892 u64 bytenr;
893 u8 type = btrfs_file_extent_type(leaf, fi);
894 int compress_type = btrfs_file_extent_compression(leaf, fi);
896 em->bdev = root->fs_info->fs_devices->latest_bdev;
897 btrfs_item_key_to_cpu(leaf, &key, slot);
898 extent_start = key.offset;
900 if (type == BTRFS_FILE_EXTENT_REG ||
901 type == BTRFS_FILE_EXTENT_PREALLOC) {
902 extent_end = extent_start +
903 btrfs_file_extent_num_bytes(leaf, fi);
904 } else if (type == BTRFS_FILE_EXTENT_INLINE) {
905 size_t size;
906 size = btrfs_file_extent_inline_len(leaf, slot, fi);
907 extent_end = ALIGN(extent_start + size, root->sectorsize);
910 em->ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
911 if (type == BTRFS_FILE_EXTENT_REG ||
912 type == BTRFS_FILE_EXTENT_PREALLOC) {
913 em->start = extent_start;
914 em->len = extent_end - extent_start;
915 em->orig_start = extent_start -
916 btrfs_file_extent_offset(leaf, fi);
917 em->orig_block_len = btrfs_file_extent_disk_num_bytes(leaf, fi);
918 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
919 if (bytenr == 0) {
920 em->block_start = EXTENT_MAP_HOLE;
921 return;
923 if (compress_type != BTRFS_COMPRESS_NONE) {
924 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
925 em->compress_type = compress_type;
926 em->block_start = bytenr;
927 em->block_len = em->orig_block_len;
928 } else {
929 bytenr += btrfs_file_extent_offset(leaf, fi);
930 em->block_start = bytenr;
931 em->block_len = em->len;
932 if (type == BTRFS_FILE_EXTENT_PREALLOC)
933 set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
935 } else if (type == BTRFS_FILE_EXTENT_INLINE) {
936 em->block_start = EXTENT_MAP_INLINE;
937 em->start = extent_start;
938 em->len = extent_end - extent_start;
940 * Initialize orig_start and block_len with the same values
941 * as in inode.c:btrfs_get_extent().
943 em->orig_start = EXTENT_MAP_HOLE;
944 em->block_len = (u64)-1;
945 if (!new_inline && compress_type != BTRFS_COMPRESS_NONE) {
946 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
947 em->compress_type = compress_type;
949 } else {
950 btrfs_err(root->fs_info,
951 "unknown file extent item type %d, inode %llu, offset %llu, root %llu",
952 type, btrfs_ino(inode), extent_start,
953 root->root_key.objectid);