staging: ks7010: remove unused structure description
[linux/fpc-iii.git] / fs / btrfs / file-item.c
blob64fcb31d71633c2731d6241b1236f7c57b1f5b6f
1 /*
2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/bio.h>
20 #include <linux/slab.h>
21 #include <linux/pagemap.h>
22 #include <linux/highmem.h>
23 #include "ctree.h"
24 #include "disk-io.h"
25 #include "transaction.h"
26 #include "volumes.h"
27 #include "print-tree.h"
28 #include "compression.h"
30 #define __MAX_CSUM_ITEMS(r, size) ((unsigned long)(((BTRFS_LEAF_DATA_SIZE(r) - \
31 sizeof(struct btrfs_item) * 2) / \
32 size) - 1))
34 #define MAX_CSUM_ITEMS(r, size) (min_t(u32, __MAX_CSUM_ITEMS(r, size), \
35 PAGE_SIZE))
37 #define MAX_ORDERED_SUM_BYTES(fs_info) ((PAGE_SIZE - \
38 sizeof(struct btrfs_ordered_sum)) / \
39 sizeof(u32) * (fs_info)->sectorsize)
41 int btrfs_insert_file_extent(struct btrfs_trans_handle *trans,
42 struct btrfs_root *root,
43 u64 objectid, u64 pos,
44 u64 disk_offset, u64 disk_num_bytes,
45 u64 num_bytes, u64 offset, u64 ram_bytes,
46 u8 compression, u8 encryption, u16 other_encoding)
48 int ret = 0;
49 struct btrfs_file_extent_item *item;
50 struct btrfs_key file_key;
51 struct btrfs_path *path;
52 struct extent_buffer *leaf;
54 path = btrfs_alloc_path();
55 if (!path)
56 return -ENOMEM;
57 file_key.objectid = objectid;
58 file_key.offset = pos;
59 file_key.type = BTRFS_EXTENT_DATA_KEY;
61 path->leave_spinning = 1;
62 ret = btrfs_insert_empty_item(trans, root, path, &file_key,
63 sizeof(*item));
64 if (ret < 0)
65 goto out;
66 BUG_ON(ret); /* Can't happen */
67 leaf = path->nodes[0];
68 item = btrfs_item_ptr(leaf, path->slots[0],
69 struct btrfs_file_extent_item);
70 btrfs_set_file_extent_disk_bytenr(leaf, item, disk_offset);
71 btrfs_set_file_extent_disk_num_bytes(leaf, item, disk_num_bytes);
72 btrfs_set_file_extent_offset(leaf, item, offset);
73 btrfs_set_file_extent_num_bytes(leaf, item, num_bytes);
74 btrfs_set_file_extent_ram_bytes(leaf, item, ram_bytes);
75 btrfs_set_file_extent_generation(leaf, item, trans->transid);
76 btrfs_set_file_extent_type(leaf, item, BTRFS_FILE_EXTENT_REG);
77 btrfs_set_file_extent_compression(leaf, item, compression);
78 btrfs_set_file_extent_encryption(leaf, item, encryption);
79 btrfs_set_file_extent_other_encoding(leaf, item, other_encoding);
81 btrfs_mark_buffer_dirty(leaf);
82 out:
83 btrfs_free_path(path);
84 return ret;
87 static struct btrfs_csum_item *
88 btrfs_lookup_csum(struct btrfs_trans_handle *trans,
89 struct btrfs_root *root,
90 struct btrfs_path *path,
91 u64 bytenr, int cow)
93 struct btrfs_fs_info *fs_info = root->fs_info;
94 int ret;
95 struct btrfs_key file_key;
96 struct btrfs_key found_key;
97 struct btrfs_csum_item *item;
98 struct extent_buffer *leaf;
99 u64 csum_offset = 0;
100 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
101 int csums_in_item;
103 file_key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
104 file_key.offset = bytenr;
105 file_key.type = BTRFS_EXTENT_CSUM_KEY;
106 ret = btrfs_search_slot(trans, root, &file_key, path, 0, cow);
107 if (ret < 0)
108 goto fail;
109 leaf = path->nodes[0];
110 if (ret > 0) {
111 ret = 1;
112 if (path->slots[0] == 0)
113 goto fail;
114 path->slots[0]--;
115 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
116 if (found_key.type != BTRFS_EXTENT_CSUM_KEY)
117 goto fail;
119 csum_offset = (bytenr - found_key.offset) >>
120 fs_info->sb->s_blocksize_bits;
121 csums_in_item = btrfs_item_size_nr(leaf, path->slots[0]);
122 csums_in_item /= csum_size;
124 if (csum_offset == csums_in_item) {
125 ret = -EFBIG;
126 goto fail;
127 } else if (csum_offset > csums_in_item) {
128 goto fail;
131 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_csum_item);
132 item = (struct btrfs_csum_item *)((unsigned char *)item +
133 csum_offset * csum_size);
134 return item;
135 fail:
136 if (ret > 0)
137 ret = -ENOENT;
138 return ERR_PTR(ret);
141 int btrfs_lookup_file_extent(struct btrfs_trans_handle *trans,
142 struct btrfs_root *root,
143 struct btrfs_path *path, u64 objectid,
144 u64 offset, int mod)
146 int ret;
147 struct btrfs_key file_key;
148 int ins_len = mod < 0 ? -1 : 0;
149 int cow = mod != 0;
151 file_key.objectid = objectid;
152 file_key.offset = offset;
153 file_key.type = BTRFS_EXTENT_DATA_KEY;
154 ret = btrfs_search_slot(trans, root, &file_key, path, ins_len, cow);
155 return ret;
158 static void btrfs_io_bio_endio_readpage(struct btrfs_io_bio *bio, int err)
160 kfree(bio->csum_allocated);
163 static int __btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio,
164 u64 logical_offset, u32 *dst, int dio)
166 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
167 struct bio_vec *bvec;
168 struct btrfs_io_bio *btrfs_bio = btrfs_io_bio(bio);
169 struct btrfs_csum_item *item = NULL;
170 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
171 struct btrfs_path *path;
172 u8 *csum;
173 u64 offset = 0;
174 u64 item_start_offset = 0;
175 u64 item_last_offset = 0;
176 u64 disk_bytenr;
177 u64 page_bytes_left;
178 u32 diff;
179 int nblocks;
180 int count = 0, i;
181 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
183 path = btrfs_alloc_path();
184 if (!path)
185 return -ENOMEM;
187 nblocks = bio->bi_iter.bi_size >> inode->i_sb->s_blocksize_bits;
188 if (!dst) {
189 if (nblocks * csum_size > BTRFS_BIO_INLINE_CSUM_SIZE) {
190 btrfs_bio->csum_allocated = kmalloc_array(nblocks,
191 csum_size, GFP_NOFS);
192 if (!btrfs_bio->csum_allocated) {
193 btrfs_free_path(path);
194 return -ENOMEM;
196 btrfs_bio->csum = btrfs_bio->csum_allocated;
197 btrfs_bio->end_io = btrfs_io_bio_endio_readpage;
198 } else {
199 btrfs_bio->csum = btrfs_bio->csum_inline;
201 csum = btrfs_bio->csum;
202 } else {
203 csum = (u8 *)dst;
206 if (bio->bi_iter.bi_size > PAGE_SIZE * 8)
207 path->reada = READA_FORWARD;
209 WARN_ON(bio->bi_vcnt <= 0);
212 * the free space stuff is only read when it hasn't been
213 * updated in the current transaction. So, we can safely
214 * read from the commit root and sidestep a nasty deadlock
215 * between reading the free space cache and updating the csum tree.
217 if (btrfs_is_free_space_inode(BTRFS_I(inode))) {
218 path->search_commit_root = 1;
219 path->skip_locking = 1;
222 disk_bytenr = (u64)bio->bi_iter.bi_sector << 9;
223 if (dio)
224 offset = logical_offset;
226 bio_for_each_segment_all(bvec, bio, i) {
227 page_bytes_left = bvec->bv_len;
228 if (count)
229 goto next;
231 if (!dio)
232 offset = page_offset(bvec->bv_page) + bvec->bv_offset;
233 count = btrfs_find_ordered_sum(inode, offset, disk_bytenr,
234 (u32 *)csum, nblocks);
235 if (count)
236 goto found;
238 if (!item || disk_bytenr < item_start_offset ||
239 disk_bytenr >= item_last_offset) {
240 struct btrfs_key found_key;
241 u32 item_size;
243 if (item)
244 btrfs_release_path(path);
245 item = btrfs_lookup_csum(NULL, fs_info->csum_root,
246 path, disk_bytenr, 0);
247 if (IS_ERR(item)) {
248 count = 1;
249 memset(csum, 0, csum_size);
250 if (BTRFS_I(inode)->root->root_key.objectid ==
251 BTRFS_DATA_RELOC_TREE_OBJECTID) {
252 set_extent_bits(io_tree, offset,
253 offset + fs_info->sectorsize - 1,
254 EXTENT_NODATASUM);
255 } else {
256 btrfs_info_rl(fs_info,
257 "no csum found for inode %llu start %llu",
258 btrfs_ino(BTRFS_I(inode)), offset);
260 item = NULL;
261 btrfs_release_path(path);
262 goto found;
264 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
265 path->slots[0]);
267 item_start_offset = found_key.offset;
268 item_size = btrfs_item_size_nr(path->nodes[0],
269 path->slots[0]);
270 item_last_offset = item_start_offset +
271 (item_size / csum_size) *
272 fs_info->sectorsize;
273 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
274 struct btrfs_csum_item);
277 * this byte range must be able to fit inside
278 * a single leaf so it will also fit inside a u32
280 diff = disk_bytenr - item_start_offset;
281 diff = diff / fs_info->sectorsize;
282 diff = diff * csum_size;
283 count = min_t(int, nblocks, (item_last_offset - disk_bytenr) >>
284 inode->i_sb->s_blocksize_bits);
285 read_extent_buffer(path->nodes[0], csum,
286 ((unsigned long)item) + diff,
287 csum_size * count);
288 found:
289 csum += count * csum_size;
290 nblocks -= count;
291 next:
292 while (count--) {
293 disk_bytenr += fs_info->sectorsize;
294 offset += fs_info->sectorsize;
295 page_bytes_left -= fs_info->sectorsize;
296 if (!page_bytes_left)
297 break; /* move to next bio */
301 WARN_ON_ONCE(count);
302 btrfs_free_path(path);
303 return 0;
306 int btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio, u32 *dst)
308 return __btrfs_lookup_bio_sums(inode, bio, 0, dst, 0);
311 int btrfs_lookup_bio_sums_dio(struct inode *inode, struct bio *bio, u64 offset)
313 return __btrfs_lookup_bio_sums(inode, bio, offset, NULL, 1);
316 int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
317 struct list_head *list, int search_commit)
319 struct btrfs_fs_info *fs_info = root->fs_info;
320 struct btrfs_key key;
321 struct btrfs_path *path;
322 struct extent_buffer *leaf;
323 struct btrfs_ordered_sum *sums;
324 struct btrfs_csum_item *item;
325 LIST_HEAD(tmplist);
326 unsigned long offset;
327 int ret;
328 size_t size;
329 u64 csum_end;
330 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
332 ASSERT(IS_ALIGNED(start, fs_info->sectorsize) &&
333 IS_ALIGNED(end + 1, fs_info->sectorsize));
335 path = btrfs_alloc_path();
336 if (!path)
337 return -ENOMEM;
339 if (search_commit) {
340 path->skip_locking = 1;
341 path->reada = READA_FORWARD;
342 path->search_commit_root = 1;
345 key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
346 key.offset = start;
347 key.type = BTRFS_EXTENT_CSUM_KEY;
349 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
350 if (ret < 0)
351 goto fail;
352 if (ret > 0 && path->slots[0] > 0) {
353 leaf = path->nodes[0];
354 btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1);
355 if (key.objectid == BTRFS_EXTENT_CSUM_OBJECTID &&
356 key.type == BTRFS_EXTENT_CSUM_KEY) {
357 offset = (start - key.offset) >>
358 fs_info->sb->s_blocksize_bits;
359 if (offset * csum_size <
360 btrfs_item_size_nr(leaf, path->slots[0] - 1))
361 path->slots[0]--;
365 while (start <= end) {
366 leaf = path->nodes[0];
367 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
368 ret = btrfs_next_leaf(root, path);
369 if (ret < 0)
370 goto fail;
371 if (ret > 0)
372 break;
373 leaf = path->nodes[0];
376 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
377 if (key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
378 key.type != BTRFS_EXTENT_CSUM_KEY ||
379 key.offset > end)
380 break;
382 if (key.offset > start)
383 start = key.offset;
385 size = btrfs_item_size_nr(leaf, path->slots[0]);
386 csum_end = key.offset + (size / csum_size) * fs_info->sectorsize;
387 if (csum_end <= start) {
388 path->slots[0]++;
389 continue;
392 csum_end = min(csum_end, end + 1);
393 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
394 struct btrfs_csum_item);
395 while (start < csum_end) {
396 size = min_t(size_t, csum_end - start,
397 MAX_ORDERED_SUM_BYTES(fs_info));
398 sums = kzalloc(btrfs_ordered_sum_size(fs_info, size),
399 GFP_NOFS);
400 if (!sums) {
401 ret = -ENOMEM;
402 goto fail;
405 sums->bytenr = start;
406 sums->len = (int)size;
408 offset = (start - key.offset) >>
409 fs_info->sb->s_blocksize_bits;
410 offset *= csum_size;
411 size >>= fs_info->sb->s_blocksize_bits;
413 read_extent_buffer(path->nodes[0],
414 sums->sums,
415 ((unsigned long)item) + offset,
416 csum_size * size);
418 start += fs_info->sectorsize * size;
419 list_add_tail(&sums->list, &tmplist);
421 path->slots[0]++;
423 ret = 0;
424 fail:
425 while (ret < 0 && !list_empty(&tmplist)) {
426 sums = list_entry(tmplist.next, struct btrfs_ordered_sum, list);
427 list_del(&sums->list);
428 kfree(sums);
430 list_splice_tail(&tmplist, list);
432 btrfs_free_path(path);
433 return ret;
436 int btrfs_csum_one_bio(struct inode *inode, struct bio *bio,
437 u64 file_start, int contig)
439 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
440 struct btrfs_ordered_sum *sums;
441 struct btrfs_ordered_extent *ordered = NULL;
442 char *data;
443 struct bio_vec *bvec;
444 int index;
445 int nr_sectors;
446 int i, j;
447 unsigned long total_bytes = 0;
448 unsigned long this_sum_bytes = 0;
449 u64 offset;
451 WARN_ON(bio->bi_vcnt <= 0);
452 sums = kzalloc(btrfs_ordered_sum_size(fs_info, bio->bi_iter.bi_size),
453 GFP_NOFS);
454 if (!sums)
455 return -ENOMEM;
457 sums->len = bio->bi_iter.bi_size;
458 INIT_LIST_HEAD(&sums->list);
460 if (contig)
461 offset = file_start;
462 else
463 offset = 0; /* shut up gcc */
465 sums->bytenr = (u64)bio->bi_iter.bi_sector << 9;
466 index = 0;
468 bio_for_each_segment_all(bvec, bio, j) {
469 if (!contig)
470 offset = page_offset(bvec->bv_page) + bvec->bv_offset;
472 if (!ordered) {
473 ordered = btrfs_lookup_ordered_extent(inode, offset);
474 BUG_ON(!ordered); /* Logic error */
477 data = kmap_atomic(bvec->bv_page);
479 nr_sectors = BTRFS_BYTES_TO_BLKS(fs_info,
480 bvec->bv_len + fs_info->sectorsize
481 - 1);
483 for (i = 0; i < nr_sectors; i++) {
484 if (offset >= ordered->file_offset + ordered->len ||
485 offset < ordered->file_offset) {
486 unsigned long bytes_left;
488 kunmap_atomic(data);
489 sums->len = this_sum_bytes;
490 this_sum_bytes = 0;
491 btrfs_add_ordered_sum(inode, ordered, sums);
492 btrfs_put_ordered_extent(ordered);
494 bytes_left = bio->bi_iter.bi_size - total_bytes;
496 sums = kzalloc(btrfs_ordered_sum_size(fs_info, bytes_left),
497 GFP_NOFS);
498 BUG_ON(!sums); /* -ENOMEM */
499 sums->len = bytes_left;
500 ordered = btrfs_lookup_ordered_extent(inode,
501 offset);
502 ASSERT(ordered); /* Logic error */
503 sums->bytenr = ((u64)bio->bi_iter.bi_sector << 9)
504 + total_bytes;
505 index = 0;
507 data = kmap_atomic(bvec->bv_page);
510 sums->sums[index] = ~(u32)0;
511 sums->sums[index]
512 = btrfs_csum_data(data + bvec->bv_offset
513 + (i * fs_info->sectorsize),
514 sums->sums[index],
515 fs_info->sectorsize);
516 btrfs_csum_final(sums->sums[index],
517 (char *)(sums->sums + index));
518 index++;
519 offset += fs_info->sectorsize;
520 this_sum_bytes += fs_info->sectorsize;
521 total_bytes += fs_info->sectorsize;
524 kunmap_atomic(data);
526 this_sum_bytes = 0;
527 btrfs_add_ordered_sum(inode, ordered, sums);
528 btrfs_put_ordered_extent(ordered);
529 return 0;
533 * helper function for csum removal, this expects the
534 * key to describe the csum pointed to by the path, and it expects
535 * the csum to overlap the range [bytenr, len]
537 * The csum should not be entirely contained in the range and the
538 * range should not be entirely contained in the csum.
540 * This calls btrfs_truncate_item with the correct args based on the
541 * overlap, and fixes up the key as required.
543 static noinline void truncate_one_csum(struct btrfs_fs_info *fs_info,
544 struct btrfs_path *path,
545 struct btrfs_key *key,
546 u64 bytenr, u64 len)
548 struct extent_buffer *leaf;
549 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
550 u64 csum_end;
551 u64 end_byte = bytenr + len;
552 u32 blocksize_bits = fs_info->sb->s_blocksize_bits;
554 leaf = path->nodes[0];
555 csum_end = btrfs_item_size_nr(leaf, path->slots[0]) / csum_size;
556 csum_end <<= fs_info->sb->s_blocksize_bits;
557 csum_end += key->offset;
559 if (key->offset < bytenr && csum_end <= end_byte) {
561 * [ bytenr - len ]
562 * [ ]
563 * [csum ]
564 * A simple truncate off the end of the item
566 u32 new_size = (bytenr - key->offset) >> blocksize_bits;
567 new_size *= csum_size;
568 btrfs_truncate_item(fs_info, path, new_size, 1);
569 } else if (key->offset >= bytenr && csum_end > end_byte &&
570 end_byte > key->offset) {
572 * [ bytenr - len ]
573 * [ ]
574 * [csum ]
575 * we need to truncate from the beginning of the csum
577 u32 new_size = (csum_end - end_byte) >> blocksize_bits;
578 new_size *= csum_size;
580 btrfs_truncate_item(fs_info, path, new_size, 0);
582 key->offset = end_byte;
583 btrfs_set_item_key_safe(fs_info, path, key);
584 } else {
585 BUG();
590 * deletes the csum items from the csum tree for a given
591 * range of bytes.
593 int btrfs_del_csums(struct btrfs_trans_handle *trans,
594 struct btrfs_fs_info *fs_info, u64 bytenr, u64 len)
596 struct btrfs_root *root = fs_info->csum_root;
597 struct btrfs_path *path;
598 struct btrfs_key key;
599 u64 end_byte = bytenr + len;
600 u64 csum_end;
601 struct extent_buffer *leaf;
602 int ret;
603 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
604 int blocksize_bits = fs_info->sb->s_blocksize_bits;
606 path = btrfs_alloc_path();
607 if (!path)
608 return -ENOMEM;
610 while (1) {
611 key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
612 key.offset = end_byte - 1;
613 key.type = BTRFS_EXTENT_CSUM_KEY;
615 path->leave_spinning = 1;
616 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
617 if (ret > 0) {
618 if (path->slots[0] == 0)
619 break;
620 path->slots[0]--;
621 } else if (ret < 0) {
622 break;
625 leaf = path->nodes[0];
626 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
628 if (key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
629 key.type != BTRFS_EXTENT_CSUM_KEY) {
630 break;
633 if (key.offset >= end_byte)
634 break;
636 csum_end = btrfs_item_size_nr(leaf, path->slots[0]) / csum_size;
637 csum_end <<= blocksize_bits;
638 csum_end += key.offset;
640 /* this csum ends before we start, we're done */
641 if (csum_end <= bytenr)
642 break;
644 /* delete the entire item, it is inside our range */
645 if (key.offset >= bytenr && csum_end <= end_byte) {
646 int del_nr = 1;
649 * Check how many csum items preceding this one in this
650 * leaf correspond to our range and then delete them all
651 * at once.
653 if (key.offset > bytenr && path->slots[0] > 0) {
654 int slot = path->slots[0] - 1;
656 while (slot >= 0) {
657 struct btrfs_key pk;
659 btrfs_item_key_to_cpu(leaf, &pk, slot);
660 if (pk.offset < bytenr ||
661 pk.type != BTRFS_EXTENT_CSUM_KEY ||
662 pk.objectid !=
663 BTRFS_EXTENT_CSUM_OBJECTID)
664 break;
665 path->slots[0] = slot;
666 del_nr++;
667 key.offset = pk.offset;
668 slot--;
671 ret = btrfs_del_items(trans, root, path,
672 path->slots[0], del_nr);
673 if (ret)
674 goto out;
675 if (key.offset == bytenr)
676 break;
677 } else if (key.offset < bytenr && csum_end > end_byte) {
678 unsigned long offset;
679 unsigned long shift_len;
680 unsigned long item_offset;
682 * [ bytenr - len ]
683 * [csum ]
685 * Our bytes are in the middle of the csum,
686 * we need to split this item and insert a new one.
688 * But we can't drop the path because the
689 * csum could change, get removed, extended etc.
691 * The trick here is the max size of a csum item leaves
692 * enough room in the tree block for a single
693 * item header. So, we split the item in place,
694 * adding a new header pointing to the existing
695 * bytes. Then we loop around again and we have
696 * a nicely formed csum item that we can neatly
697 * truncate.
699 offset = (bytenr - key.offset) >> blocksize_bits;
700 offset *= csum_size;
702 shift_len = (len >> blocksize_bits) * csum_size;
704 item_offset = btrfs_item_ptr_offset(leaf,
705 path->slots[0]);
707 memzero_extent_buffer(leaf, item_offset + offset,
708 shift_len);
709 key.offset = bytenr;
712 * btrfs_split_item returns -EAGAIN when the
713 * item changed size or key
715 ret = btrfs_split_item(trans, root, path, &key, offset);
716 if (ret && ret != -EAGAIN) {
717 btrfs_abort_transaction(trans, ret);
718 goto out;
721 key.offset = end_byte - 1;
722 } else {
723 truncate_one_csum(fs_info, path, &key, bytenr, len);
724 if (key.offset < bytenr)
725 break;
727 btrfs_release_path(path);
729 ret = 0;
730 out:
731 btrfs_free_path(path);
732 return ret;
735 int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
736 struct btrfs_root *root,
737 struct btrfs_ordered_sum *sums)
739 struct btrfs_fs_info *fs_info = root->fs_info;
740 struct btrfs_key file_key;
741 struct btrfs_key found_key;
742 struct btrfs_path *path;
743 struct btrfs_csum_item *item;
744 struct btrfs_csum_item *item_end;
745 struct extent_buffer *leaf = NULL;
746 u64 next_offset;
747 u64 total_bytes = 0;
748 u64 csum_offset;
749 u64 bytenr;
750 u32 nritems;
751 u32 ins_size;
752 int index = 0;
753 int found_next;
754 int ret;
755 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
757 path = btrfs_alloc_path();
758 if (!path)
759 return -ENOMEM;
760 again:
761 next_offset = (u64)-1;
762 found_next = 0;
763 bytenr = sums->bytenr + total_bytes;
764 file_key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
765 file_key.offset = bytenr;
766 file_key.type = BTRFS_EXTENT_CSUM_KEY;
768 item = btrfs_lookup_csum(trans, root, path, bytenr, 1);
769 if (!IS_ERR(item)) {
770 ret = 0;
771 leaf = path->nodes[0];
772 item_end = btrfs_item_ptr(leaf, path->slots[0],
773 struct btrfs_csum_item);
774 item_end = (struct btrfs_csum_item *)((char *)item_end +
775 btrfs_item_size_nr(leaf, path->slots[0]));
776 goto found;
778 ret = PTR_ERR(item);
779 if (ret != -EFBIG && ret != -ENOENT)
780 goto fail_unlock;
782 if (ret == -EFBIG) {
783 u32 item_size;
784 /* we found one, but it isn't big enough yet */
785 leaf = path->nodes[0];
786 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
787 if ((item_size / csum_size) >=
788 MAX_CSUM_ITEMS(fs_info, csum_size)) {
789 /* already at max size, make a new one */
790 goto insert;
792 } else {
793 int slot = path->slots[0] + 1;
794 /* we didn't find a csum item, insert one */
795 nritems = btrfs_header_nritems(path->nodes[0]);
796 if (!nritems || (path->slots[0] >= nritems - 1)) {
797 ret = btrfs_next_leaf(root, path);
798 if (ret == 1)
799 found_next = 1;
800 if (ret != 0)
801 goto insert;
802 slot = path->slots[0];
804 btrfs_item_key_to_cpu(path->nodes[0], &found_key, slot);
805 if (found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
806 found_key.type != BTRFS_EXTENT_CSUM_KEY) {
807 found_next = 1;
808 goto insert;
810 next_offset = found_key.offset;
811 found_next = 1;
812 goto insert;
816 * at this point, we know the tree has an item, but it isn't big
817 * enough yet to put our csum in. Grow it
819 btrfs_release_path(path);
820 ret = btrfs_search_slot(trans, root, &file_key, path,
821 csum_size, 1);
822 if (ret < 0)
823 goto fail_unlock;
825 if (ret > 0) {
826 if (path->slots[0] == 0)
827 goto insert;
828 path->slots[0]--;
831 leaf = path->nodes[0];
832 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
833 csum_offset = (bytenr - found_key.offset) >>
834 fs_info->sb->s_blocksize_bits;
836 if (found_key.type != BTRFS_EXTENT_CSUM_KEY ||
837 found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
838 csum_offset >= MAX_CSUM_ITEMS(fs_info, csum_size)) {
839 goto insert;
842 if (csum_offset == btrfs_item_size_nr(leaf, path->slots[0]) /
843 csum_size) {
844 int extend_nr;
845 u64 tmp;
846 u32 diff;
847 u32 free_space;
849 if (btrfs_leaf_free_space(fs_info, leaf) <
850 sizeof(struct btrfs_item) + csum_size * 2)
851 goto insert;
853 free_space = btrfs_leaf_free_space(fs_info, leaf) -
854 sizeof(struct btrfs_item) - csum_size;
855 tmp = sums->len - total_bytes;
856 tmp >>= fs_info->sb->s_blocksize_bits;
857 WARN_ON(tmp < 1);
859 extend_nr = max_t(int, 1, (int)tmp);
860 diff = (csum_offset + extend_nr) * csum_size;
861 diff = min(diff,
862 MAX_CSUM_ITEMS(fs_info, csum_size) * csum_size);
864 diff = diff - btrfs_item_size_nr(leaf, path->slots[0]);
865 diff = min(free_space, diff);
866 diff /= csum_size;
867 diff *= csum_size;
869 btrfs_extend_item(fs_info, path, diff);
870 ret = 0;
871 goto csum;
874 insert:
875 btrfs_release_path(path);
876 csum_offset = 0;
877 if (found_next) {
878 u64 tmp;
880 tmp = sums->len - total_bytes;
881 tmp >>= fs_info->sb->s_blocksize_bits;
882 tmp = min(tmp, (next_offset - file_key.offset) >>
883 fs_info->sb->s_blocksize_bits);
885 tmp = max_t(u64, 1, tmp);
886 tmp = min_t(u64, tmp, MAX_CSUM_ITEMS(fs_info, csum_size));
887 ins_size = csum_size * tmp;
888 } else {
889 ins_size = csum_size;
891 path->leave_spinning = 1;
892 ret = btrfs_insert_empty_item(trans, root, path, &file_key,
893 ins_size);
894 path->leave_spinning = 0;
895 if (ret < 0)
896 goto fail_unlock;
897 if (WARN_ON(ret != 0))
898 goto fail_unlock;
899 leaf = path->nodes[0];
900 csum:
901 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_csum_item);
902 item_end = (struct btrfs_csum_item *)((unsigned char *)item +
903 btrfs_item_size_nr(leaf, path->slots[0]));
904 item = (struct btrfs_csum_item *)((unsigned char *)item +
905 csum_offset * csum_size);
906 found:
907 ins_size = (u32)(sums->len - total_bytes) >>
908 fs_info->sb->s_blocksize_bits;
909 ins_size *= csum_size;
910 ins_size = min_t(u32, (unsigned long)item_end - (unsigned long)item,
911 ins_size);
912 write_extent_buffer(leaf, sums->sums + index, (unsigned long)item,
913 ins_size);
915 ins_size /= csum_size;
916 total_bytes += ins_size * fs_info->sectorsize;
917 index += ins_size;
919 btrfs_mark_buffer_dirty(path->nodes[0]);
920 if (total_bytes < sums->len) {
921 btrfs_release_path(path);
922 cond_resched();
923 goto again;
925 out:
926 btrfs_free_path(path);
927 return ret;
929 fail_unlock:
930 goto out;
933 void btrfs_extent_item_to_extent_map(struct btrfs_inode *inode,
934 const struct btrfs_path *path,
935 struct btrfs_file_extent_item *fi,
936 const bool new_inline,
937 struct extent_map *em)
939 struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
940 struct btrfs_root *root = inode->root;
941 struct extent_buffer *leaf = path->nodes[0];
942 const int slot = path->slots[0];
943 struct btrfs_key key;
944 u64 extent_start, extent_end;
945 u64 bytenr;
946 u8 type = btrfs_file_extent_type(leaf, fi);
947 int compress_type = btrfs_file_extent_compression(leaf, fi);
949 em->bdev = fs_info->fs_devices->latest_bdev;
950 btrfs_item_key_to_cpu(leaf, &key, slot);
951 extent_start = key.offset;
953 if (type == BTRFS_FILE_EXTENT_REG ||
954 type == BTRFS_FILE_EXTENT_PREALLOC) {
955 extent_end = extent_start +
956 btrfs_file_extent_num_bytes(leaf, fi);
957 } else if (type == BTRFS_FILE_EXTENT_INLINE) {
958 size_t size;
959 size = btrfs_file_extent_inline_len(leaf, slot, fi);
960 extent_end = ALIGN(extent_start + size,
961 fs_info->sectorsize);
964 em->ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
965 if (type == BTRFS_FILE_EXTENT_REG ||
966 type == BTRFS_FILE_EXTENT_PREALLOC) {
967 em->start = extent_start;
968 em->len = extent_end - extent_start;
969 em->orig_start = extent_start -
970 btrfs_file_extent_offset(leaf, fi);
971 em->orig_block_len = btrfs_file_extent_disk_num_bytes(leaf, fi);
972 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
973 if (bytenr == 0) {
974 em->block_start = EXTENT_MAP_HOLE;
975 return;
977 if (compress_type != BTRFS_COMPRESS_NONE) {
978 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
979 em->compress_type = compress_type;
980 em->block_start = bytenr;
981 em->block_len = em->orig_block_len;
982 } else {
983 bytenr += btrfs_file_extent_offset(leaf, fi);
984 em->block_start = bytenr;
985 em->block_len = em->len;
986 if (type == BTRFS_FILE_EXTENT_PREALLOC)
987 set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
989 } else if (type == BTRFS_FILE_EXTENT_INLINE) {
990 em->block_start = EXTENT_MAP_INLINE;
991 em->start = extent_start;
992 em->len = extent_end - extent_start;
994 * Initialize orig_start and block_len with the same values
995 * as in inode.c:btrfs_get_extent().
997 em->orig_start = EXTENT_MAP_HOLE;
998 em->block_len = (u64)-1;
999 if (!new_inline && compress_type != BTRFS_COMPRESS_NONE) {
1000 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
1001 em->compress_type = compress_type;
1003 } else {
1004 btrfs_err(fs_info,
1005 "unknown file extent item type %d, inode %llu, offset %llu, "
1006 "root %llu", type, btrfs_ino(inode), extent_start,
1007 root->root_key.objectid);