mm: fix exec activate_mm vs TLB shootdown and lazy tlb switching race
[linux/fpc-iii.git] / fs / btrfs / file-item.c
blobedd5f152e448751a9afef3ce0a6f92d7ee577b46
1 /*
2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/bio.h>
20 #include <linux/slab.h>
21 #include <linux/pagemap.h>
22 #include <linux/highmem.h>
23 #include "ctree.h"
24 #include "disk-io.h"
25 #include "transaction.h"
26 #include "volumes.h"
27 #include "print-tree.h"
28 #include "compression.h"
30 #define __MAX_CSUM_ITEMS(r, size) ((unsigned long)(((BTRFS_LEAF_DATA_SIZE(r) - \
31 sizeof(struct btrfs_item) * 2) / \
32 size) - 1))
34 #define MAX_CSUM_ITEMS(r, size) (min_t(u32, __MAX_CSUM_ITEMS(r, size), \
35 PAGE_SIZE))
37 #define MAX_ORDERED_SUM_BYTES(fs_info) ((PAGE_SIZE - \
38 sizeof(struct btrfs_ordered_sum)) / \
39 sizeof(u32) * (fs_info)->sectorsize)
41 int btrfs_insert_file_extent(struct btrfs_trans_handle *trans,
42 struct btrfs_root *root,
43 u64 objectid, u64 pos,
44 u64 disk_offset, u64 disk_num_bytes,
45 u64 num_bytes, u64 offset, u64 ram_bytes,
46 u8 compression, u8 encryption, u16 other_encoding)
48 int ret = 0;
49 struct btrfs_file_extent_item *item;
50 struct btrfs_key file_key;
51 struct btrfs_path *path;
52 struct extent_buffer *leaf;
54 path = btrfs_alloc_path();
55 if (!path)
56 return -ENOMEM;
57 file_key.objectid = objectid;
58 file_key.offset = pos;
59 file_key.type = BTRFS_EXTENT_DATA_KEY;
61 path->leave_spinning = 1;
62 ret = btrfs_insert_empty_item(trans, root, path, &file_key,
63 sizeof(*item));
64 if (ret < 0)
65 goto out;
66 BUG_ON(ret); /* Can't happen */
67 leaf = path->nodes[0];
68 item = btrfs_item_ptr(leaf, path->slots[0],
69 struct btrfs_file_extent_item);
70 btrfs_set_file_extent_disk_bytenr(leaf, item, disk_offset);
71 btrfs_set_file_extent_disk_num_bytes(leaf, item, disk_num_bytes);
72 btrfs_set_file_extent_offset(leaf, item, offset);
73 btrfs_set_file_extent_num_bytes(leaf, item, num_bytes);
74 btrfs_set_file_extent_ram_bytes(leaf, item, ram_bytes);
75 btrfs_set_file_extent_generation(leaf, item, trans->transid);
76 btrfs_set_file_extent_type(leaf, item, BTRFS_FILE_EXTENT_REG);
77 btrfs_set_file_extent_compression(leaf, item, compression);
78 btrfs_set_file_extent_encryption(leaf, item, encryption);
79 btrfs_set_file_extent_other_encoding(leaf, item, other_encoding);
81 btrfs_mark_buffer_dirty(leaf);
82 out:
83 btrfs_free_path(path);
84 return ret;
87 static struct btrfs_csum_item *
88 btrfs_lookup_csum(struct btrfs_trans_handle *trans,
89 struct btrfs_root *root,
90 struct btrfs_path *path,
91 u64 bytenr, int cow)
93 struct btrfs_fs_info *fs_info = root->fs_info;
94 int ret;
95 struct btrfs_key file_key;
96 struct btrfs_key found_key;
97 struct btrfs_csum_item *item;
98 struct extent_buffer *leaf;
99 u64 csum_offset = 0;
100 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
101 int csums_in_item;
103 file_key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
104 file_key.offset = bytenr;
105 file_key.type = BTRFS_EXTENT_CSUM_KEY;
106 ret = btrfs_search_slot(trans, root, &file_key, path, 0, cow);
107 if (ret < 0)
108 goto fail;
109 leaf = path->nodes[0];
110 if (ret > 0) {
111 ret = 1;
112 if (path->slots[0] == 0)
113 goto fail;
114 path->slots[0]--;
115 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
116 if (found_key.type != BTRFS_EXTENT_CSUM_KEY)
117 goto fail;
119 csum_offset = (bytenr - found_key.offset) >>
120 fs_info->sb->s_blocksize_bits;
121 csums_in_item = btrfs_item_size_nr(leaf, path->slots[0]);
122 csums_in_item /= csum_size;
124 if (csum_offset == csums_in_item) {
125 ret = -EFBIG;
126 goto fail;
127 } else if (csum_offset > csums_in_item) {
128 goto fail;
131 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_csum_item);
132 item = (struct btrfs_csum_item *)((unsigned char *)item +
133 csum_offset * csum_size);
134 return item;
135 fail:
136 if (ret > 0)
137 ret = -ENOENT;
138 return ERR_PTR(ret);
141 int btrfs_lookup_file_extent(struct btrfs_trans_handle *trans,
142 struct btrfs_root *root,
143 struct btrfs_path *path, u64 objectid,
144 u64 offset, int mod)
146 int ret;
147 struct btrfs_key file_key;
148 int ins_len = mod < 0 ? -1 : 0;
149 int cow = mod != 0;
151 file_key.objectid = objectid;
152 file_key.offset = offset;
153 file_key.type = BTRFS_EXTENT_DATA_KEY;
154 ret = btrfs_search_slot(trans, root, &file_key, path, ins_len, cow);
155 return ret;
158 static void btrfs_io_bio_endio_readpage(struct btrfs_io_bio *bio, int err)
160 kfree(bio->csum_allocated);
163 static blk_status_t __btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio,
164 u64 logical_offset, u32 *dst, int dio)
166 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
167 struct bio_vec bvec;
168 struct bvec_iter iter;
169 struct btrfs_io_bio *btrfs_bio = btrfs_io_bio(bio);
170 struct btrfs_csum_item *item = NULL;
171 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
172 struct btrfs_path *path;
173 u8 *csum;
174 u64 offset = 0;
175 u64 item_start_offset = 0;
176 u64 item_last_offset = 0;
177 u64 disk_bytenr;
178 u64 page_bytes_left;
179 u32 diff;
180 int nblocks;
181 int count = 0;
182 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
184 path = btrfs_alloc_path();
185 if (!path)
186 return BLK_STS_RESOURCE;
188 nblocks = bio->bi_iter.bi_size >> inode->i_sb->s_blocksize_bits;
189 if (!dst) {
190 if (nblocks * csum_size > BTRFS_BIO_INLINE_CSUM_SIZE) {
191 btrfs_bio->csum_allocated = kmalloc_array(nblocks,
192 csum_size, GFP_NOFS);
193 if (!btrfs_bio->csum_allocated) {
194 btrfs_free_path(path);
195 return BLK_STS_RESOURCE;
197 btrfs_bio->csum = btrfs_bio->csum_allocated;
198 btrfs_bio->end_io = btrfs_io_bio_endio_readpage;
199 } else {
200 btrfs_bio->csum = btrfs_bio->csum_inline;
202 csum = btrfs_bio->csum;
203 } else {
204 csum = (u8 *)dst;
207 if (bio->bi_iter.bi_size > PAGE_SIZE * 8)
208 path->reada = READA_FORWARD;
211 * the free space stuff is only read when it hasn't been
212 * updated in the current transaction. So, we can safely
213 * read from the commit root and sidestep a nasty deadlock
214 * between reading the free space cache and updating the csum tree.
216 if (btrfs_is_free_space_inode(BTRFS_I(inode))) {
217 path->search_commit_root = 1;
218 path->skip_locking = 1;
221 disk_bytenr = (u64)bio->bi_iter.bi_sector << 9;
222 if (dio)
223 offset = logical_offset;
225 bio_for_each_segment(bvec, bio, iter) {
226 page_bytes_left = bvec.bv_len;
227 if (count)
228 goto next;
230 if (!dio)
231 offset = page_offset(bvec.bv_page) + bvec.bv_offset;
232 count = btrfs_find_ordered_sum(inode, offset, disk_bytenr,
233 (u32 *)csum, nblocks);
234 if (count)
235 goto found;
237 if (!item || disk_bytenr < item_start_offset ||
238 disk_bytenr >= item_last_offset) {
239 struct btrfs_key found_key;
240 u32 item_size;
242 if (item)
243 btrfs_release_path(path);
244 item = btrfs_lookup_csum(NULL, fs_info->csum_root,
245 path, disk_bytenr, 0);
246 if (IS_ERR(item)) {
247 count = 1;
248 memset(csum, 0, csum_size);
249 if (BTRFS_I(inode)->root->root_key.objectid ==
250 BTRFS_DATA_RELOC_TREE_OBJECTID) {
251 set_extent_bits(io_tree, offset,
252 offset + fs_info->sectorsize - 1,
253 EXTENT_NODATASUM);
254 } else {
255 btrfs_info_rl(fs_info,
256 "no csum found for inode %llu start %llu",
257 btrfs_ino(BTRFS_I(inode)), offset);
259 item = NULL;
260 btrfs_release_path(path);
261 goto found;
263 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
264 path->slots[0]);
266 item_start_offset = found_key.offset;
267 item_size = btrfs_item_size_nr(path->nodes[0],
268 path->slots[0]);
269 item_last_offset = item_start_offset +
270 (item_size / csum_size) *
271 fs_info->sectorsize;
272 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
273 struct btrfs_csum_item);
276 * this byte range must be able to fit inside
277 * a single leaf so it will also fit inside a u32
279 diff = disk_bytenr - item_start_offset;
280 diff = diff / fs_info->sectorsize;
281 diff = diff * csum_size;
282 count = min_t(int, nblocks, (item_last_offset - disk_bytenr) >>
283 inode->i_sb->s_blocksize_bits);
284 read_extent_buffer(path->nodes[0], csum,
285 ((unsigned long)item) + diff,
286 csum_size * count);
287 found:
288 csum += count * csum_size;
289 nblocks -= count;
290 next:
291 while (count > 0) {
292 count--;
293 disk_bytenr += fs_info->sectorsize;
294 offset += fs_info->sectorsize;
295 page_bytes_left -= fs_info->sectorsize;
296 if (!page_bytes_left)
297 break; /* move to next bio */
301 WARN_ON_ONCE(count);
302 btrfs_free_path(path);
303 return 0;
306 blk_status_t btrfs_lookup_bio_sums(struct inode *inode, struct bio *bio, u32 *dst)
308 return __btrfs_lookup_bio_sums(inode, bio, 0, dst, 0);
311 blk_status_t btrfs_lookup_bio_sums_dio(struct inode *inode, struct bio *bio, u64 offset)
313 return __btrfs_lookup_bio_sums(inode, bio, offset, NULL, 1);
316 int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
317 struct list_head *list, int search_commit)
319 struct btrfs_fs_info *fs_info = root->fs_info;
320 struct btrfs_key key;
321 struct btrfs_path *path;
322 struct extent_buffer *leaf;
323 struct btrfs_ordered_sum *sums;
324 struct btrfs_csum_item *item;
325 LIST_HEAD(tmplist);
326 unsigned long offset;
327 int ret;
328 size_t size;
329 u64 csum_end;
330 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
332 ASSERT(IS_ALIGNED(start, fs_info->sectorsize) &&
333 IS_ALIGNED(end + 1, fs_info->sectorsize));
335 path = btrfs_alloc_path();
336 if (!path)
337 return -ENOMEM;
339 if (search_commit) {
340 path->skip_locking = 1;
341 path->reada = READA_FORWARD;
342 path->search_commit_root = 1;
345 key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
346 key.offset = start;
347 key.type = BTRFS_EXTENT_CSUM_KEY;
349 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
350 if (ret < 0)
351 goto fail;
352 if (ret > 0 && path->slots[0] > 0) {
353 leaf = path->nodes[0];
354 btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1);
355 if (key.objectid == BTRFS_EXTENT_CSUM_OBJECTID &&
356 key.type == BTRFS_EXTENT_CSUM_KEY) {
357 offset = (start - key.offset) >>
358 fs_info->sb->s_blocksize_bits;
359 if (offset * csum_size <
360 btrfs_item_size_nr(leaf, path->slots[0] - 1))
361 path->slots[0]--;
365 while (start <= end) {
366 leaf = path->nodes[0];
367 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
368 ret = btrfs_next_leaf(root, path);
369 if (ret < 0)
370 goto fail;
371 if (ret > 0)
372 break;
373 leaf = path->nodes[0];
376 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
377 if (key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
378 key.type != BTRFS_EXTENT_CSUM_KEY ||
379 key.offset > end)
380 break;
382 if (key.offset > start)
383 start = key.offset;
385 size = btrfs_item_size_nr(leaf, path->slots[0]);
386 csum_end = key.offset + (size / csum_size) * fs_info->sectorsize;
387 if (csum_end <= start) {
388 path->slots[0]++;
389 continue;
392 csum_end = min(csum_end, end + 1);
393 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
394 struct btrfs_csum_item);
395 while (start < csum_end) {
396 size = min_t(size_t, csum_end - start,
397 MAX_ORDERED_SUM_BYTES(fs_info));
398 sums = kzalloc(btrfs_ordered_sum_size(fs_info, size),
399 GFP_NOFS);
400 if (!sums) {
401 ret = -ENOMEM;
402 goto fail;
405 sums->bytenr = start;
406 sums->len = (int)size;
408 offset = (start - key.offset) >>
409 fs_info->sb->s_blocksize_bits;
410 offset *= csum_size;
411 size >>= fs_info->sb->s_blocksize_bits;
413 read_extent_buffer(path->nodes[0],
414 sums->sums,
415 ((unsigned long)item) + offset,
416 csum_size * size);
418 start += fs_info->sectorsize * size;
419 list_add_tail(&sums->list, &tmplist);
421 path->slots[0]++;
423 ret = 0;
424 fail:
425 while (ret < 0 && !list_empty(&tmplist)) {
426 sums = list_entry(tmplist.next, struct btrfs_ordered_sum, list);
427 list_del(&sums->list);
428 kfree(sums);
430 list_splice_tail(&tmplist, list);
432 btrfs_free_path(path);
433 return ret;
436 blk_status_t btrfs_csum_one_bio(struct inode *inode, struct bio *bio,
437 u64 file_start, int contig)
439 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
440 struct btrfs_ordered_sum *sums;
441 struct btrfs_ordered_extent *ordered = NULL;
442 char *data;
443 struct bvec_iter iter;
444 struct bio_vec bvec;
445 int index;
446 int nr_sectors;
447 unsigned long total_bytes = 0;
448 unsigned long this_sum_bytes = 0;
449 int i;
450 u64 offset;
452 sums = kzalloc(btrfs_ordered_sum_size(fs_info, bio->bi_iter.bi_size),
453 GFP_NOFS);
454 if (!sums)
455 return BLK_STS_RESOURCE;
457 sums->len = bio->bi_iter.bi_size;
458 INIT_LIST_HEAD(&sums->list);
460 if (contig)
461 offset = file_start;
462 else
463 offset = 0; /* shut up gcc */
465 sums->bytenr = (u64)bio->bi_iter.bi_sector << 9;
466 index = 0;
468 bio_for_each_segment(bvec, bio, iter) {
469 if (!contig)
470 offset = page_offset(bvec.bv_page) + bvec.bv_offset;
472 if (!ordered) {
473 ordered = btrfs_lookup_ordered_extent(inode, offset);
474 BUG_ON(!ordered); /* Logic error */
477 data = kmap_atomic(bvec.bv_page);
479 nr_sectors = BTRFS_BYTES_TO_BLKS(fs_info,
480 bvec.bv_len + fs_info->sectorsize
481 - 1);
483 for (i = 0; i < nr_sectors; i++) {
484 if (offset >= ordered->file_offset + ordered->len ||
485 offset < ordered->file_offset) {
486 unsigned long bytes_left;
488 kunmap_atomic(data);
489 sums->len = this_sum_bytes;
490 this_sum_bytes = 0;
491 btrfs_add_ordered_sum(inode, ordered, sums);
492 btrfs_put_ordered_extent(ordered);
494 bytes_left = bio->bi_iter.bi_size - total_bytes;
496 sums = kzalloc(btrfs_ordered_sum_size(fs_info, bytes_left),
497 GFP_NOFS);
498 BUG_ON(!sums); /* -ENOMEM */
499 sums->len = bytes_left;
500 ordered = btrfs_lookup_ordered_extent(inode,
501 offset);
502 ASSERT(ordered); /* Logic error */
503 sums->bytenr = ((u64)bio->bi_iter.bi_sector << 9)
504 + total_bytes;
505 index = 0;
507 data = kmap_atomic(bvec.bv_page);
510 sums->sums[index] = ~(u32)0;
511 sums->sums[index]
512 = btrfs_csum_data(data + bvec.bv_offset
513 + (i * fs_info->sectorsize),
514 sums->sums[index],
515 fs_info->sectorsize);
516 btrfs_csum_final(sums->sums[index],
517 (char *)(sums->sums + index));
518 index++;
519 offset += fs_info->sectorsize;
520 this_sum_bytes += fs_info->sectorsize;
521 total_bytes += fs_info->sectorsize;
524 kunmap_atomic(data);
526 this_sum_bytes = 0;
527 btrfs_add_ordered_sum(inode, ordered, sums);
528 btrfs_put_ordered_extent(ordered);
529 return 0;
533 * helper function for csum removal, this expects the
534 * key to describe the csum pointed to by the path, and it expects
535 * the csum to overlap the range [bytenr, len]
537 * The csum should not be entirely contained in the range and the
538 * range should not be entirely contained in the csum.
540 * This calls btrfs_truncate_item with the correct args based on the
541 * overlap, and fixes up the key as required.
543 static noinline void truncate_one_csum(struct btrfs_fs_info *fs_info,
544 struct btrfs_path *path,
545 struct btrfs_key *key,
546 u64 bytenr, u64 len)
548 struct extent_buffer *leaf;
549 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
550 u64 csum_end;
551 u64 end_byte = bytenr + len;
552 u32 blocksize_bits = fs_info->sb->s_blocksize_bits;
554 leaf = path->nodes[0];
555 csum_end = btrfs_item_size_nr(leaf, path->slots[0]) / csum_size;
556 csum_end <<= fs_info->sb->s_blocksize_bits;
557 csum_end += key->offset;
559 if (key->offset < bytenr && csum_end <= end_byte) {
561 * [ bytenr - len ]
562 * [ ]
563 * [csum ]
564 * A simple truncate off the end of the item
566 u32 new_size = (bytenr - key->offset) >> blocksize_bits;
567 new_size *= csum_size;
568 btrfs_truncate_item(fs_info, path, new_size, 1);
569 } else if (key->offset >= bytenr && csum_end > end_byte &&
570 end_byte > key->offset) {
572 * [ bytenr - len ]
573 * [ ]
574 * [csum ]
575 * we need to truncate from the beginning of the csum
577 u32 new_size = (csum_end - end_byte) >> blocksize_bits;
578 new_size *= csum_size;
580 btrfs_truncate_item(fs_info, path, new_size, 0);
582 key->offset = end_byte;
583 btrfs_set_item_key_safe(fs_info, path, key);
584 } else {
585 BUG();
590 * deletes the csum items from the csum tree for a given
591 * range of bytes.
593 int btrfs_del_csums(struct btrfs_trans_handle *trans,
594 struct btrfs_fs_info *fs_info, u64 bytenr, u64 len)
596 struct btrfs_root *root = fs_info->csum_root;
597 struct btrfs_path *path;
598 struct btrfs_key key;
599 u64 end_byte = bytenr + len;
600 u64 csum_end;
601 struct extent_buffer *leaf;
602 int ret;
603 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
604 int blocksize_bits = fs_info->sb->s_blocksize_bits;
606 path = btrfs_alloc_path();
607 if (!path)
608 return -ENOMEM;
610 while (1) {
611 key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
612 key.offset = end_byte - 1;
613 key.type = BTRFS_EXTENT_CSUM_KEY;
615 path->leave_spinning = 1;
616 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
617 if (ret > 0) {
618 if (path->slots[0] == 0)
619 break;
620 path->slots[0]--;
621 } else if (ret < 0) {
622 break;
625 leaf = path->nodes[0];
626 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
628 if (key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
629 key.type != BTRFS_EXTENT_CSUM_KEY) {
630 break;
633 if (key.offset >= end_byte)
634 break;
636 csum_end = btrfs_item_size_nr(leaf, path->slots[0]) / csum_size;
637 csum_end <<= blocksize_bits;
638 csum_end += key.offset;
640 /* this csum ends before we start, we're done */
641 if (csum_end <= bytenr)
642 break;
644 /* delete the entire item, it is inside our range */
645 if (key.offset >= bytenr && csum_end <= end_byte) {
646 int del_nr = 1;
649 * Check how many csum items preceding this one in this
650 * leaf correspond to our range and then delete them all
651 * at once.
653 if (key.offset > bytenr && path->slots[0] > 0) {
654 int slot = path->slots[0] - 1;
656 while (slot >= 0) {
657 struct btrfs_key pk;
659 btrfs_item_key_to_cpu(leaf, &pk, slot);
660 if (pk.offset < bytenr ||
661 pk.type != BTRFS_EXTENT_CSUM_KEY ||
662 pk.objectid !=
663 BTRFS_EXTENT_CSUM_OBJECTID)
664 break;
665 path->slots[0] = slot;
666 del_nr++;
667 key.offset = pk.offset;
668 slot--;
671 ret = btrfs_del_items(trans, root, path,
672 path->slots[0], del_nr);
673 if (ret)
674 goto out;
675 if (key.offset == bytenr)
676 break;
677 } else if (key.offset < bytenr && csum_end > end_byte) {
678 unsigned long offset;
679 unsigned long shift_len;
680 unsigned long item_offset;
682 * [ bytenr - len ]
683 * [csum ]
685 * Our bytes are in the middle of the csum,
686 * we need to split this item and insert a new one.
688 * But we can't drop the path because the
689 * csum could change, get removed, extended etc.
691 * The trick here is the max size of a csum item leaves
692 * enough room in the tree block for a single
693 * item header. So, we split the item in place,
694 * adding a new header pointing to the existing
695 * bytes. Then we loop around again and we have
696 * a nicely formed csum item that we can neatly
697 * truncate.
699 offset = (bytenr - key.offset) >> blocksize_bits;
700 offset *= csum_size;
702 shift_len = (len >> blocksize_bits) * csum_size;
704 item_offset = btrfs_item_ptr_offset(leaf,
705 path->slots[0]);
707 memzero_extent_buffer(leaf, item_offset + offset,
708 shift_len);
709 key.offset = bytenr;
712 * btrfs_split_item returns -EAGAIN when the
713 * item changed size or key
715 ret = btrfs_split_item(trans, root, path, &key, offset);
716 if (ret && ret != -EAGAIN) {
717 btrfs_abort_transaction(trans, ret);
718 goto out;
721 key.offset = end_byte - 1;
722 } else {
723 truncate_one_csum(fs_info, path, &key, bytenr, len);
724 if (key.offset < bytenr)
725 break;
727 btrfs_release_path(path);
729 ret = 0;
730 out:
731 btrfs_free_path(path);
732 return ret;
735 int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
736 struct btrfs_root *root,
737 struct btrfs_ordered_sum *sums)
739 struct btrfs_fs_info *fs_info = root->fs_info;
740 struct btrfs_key file_key;
741 struct btrfs_key found_key;
742 struct btrfs_path *path;
743 struct btrfs_csum_item *item;
744 struct btrfs_csum_item *item_end;
745 struct extent_buffer *leaf = NULL;
746 u64 next_offset;
747 u64 total_bytes = 0;
748 u64 csum_offset;
749 u64 bytenr;
750 u32 nritems;
751 u32 ins_size;
752 int index = 0;
753 int found_next;
754 int ret;
755 u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
757 path = btrfs_alloc_path();
758 if (!path)
759 return -ENOMEM;
760 again:
761 next_offset = (u64)-1;
762 found_next = 0;
763 bytenr = sums->bytenr + total_bytes;
764 file_key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
765 file_key.offset = bytenr;
766 file_key.type = BTRFS_EXTENT_CSUM_KEY;
768 item = btrfs_lookup_csum(trans, root, path, bytenr, 1);
769 if (!IS_ERR(item)) {
770 ret = 0;
771 leaf = path->nodes[0];
772 item_end = btrfs_item_ptr(leaf, path->slots[0],
773 struct btrfs_csum_item);
774 item_end = (struct btrfs_csum_item *)((char *)item_end +
775 btrfs_item_size_nr(leaf, path->slots[0]));
776 goto found;
778 ret = PTR_ERR(item);
779 if (ret != -EFBIG && ret != -ENOENT)
780 goto fail_unlock;
782 if (ret == -EFBIG) {
783 u32 item_size;
784 /* we found one, but it isn't big enough yet */
785 leaf = path->nodes[0];
786 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
787 if ((item_size / csum_size) >=
788 MAX_CSUM_ITEMS(fs_info, csum_size)) {
789 /* already at max size, make a new one */
790 goto insert;
792 } else {
793 int slot = path->slots[0] + 1;
794 /* we didn't find a csum item, insert one */
795 nritems = btrfs_header_nritems(path->nodes[0]);
796 if (!nritems || (path->slots[0] >= nritems - 1)) {
797 ret = btrfs_next_leaf(root, path);
798 if (ret < 0) {
799 goto out;
800 } else if (ret > 0) {
801 found_next = 1;
802 goto insert;
804 slot = path->slots[0];
806 btrfs_item_key_to_cpu(path->nodes[0], &found_key, slot);
807 if (found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
808 found_key.type != BTRFS_EXTENT_CSUM_KEY) {
809 found_next = 1;
810 goto insert;
812 next_offset = found_key.offset;
813 found_next = 1;
814 goto insert;
818 * at this point, we know the tree has an item, but it isn't big
819 * enough yet to put our csum in. Grow it
821 btrfs_release_path(path);
822 ret = btrfs_search_slot(trans, root, &file_key, path,
823 csum_size, 1);
824 if (ret < 0)
825 goto fail_unlock;
827 if (ret > 0) {
828 if (path->slots[0] == 0)
829 goto insert;
830 path->slots[0]--;
833 leaf = path->nodes[0];
834 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
835 csum_offset = (bytenr - found_key.offset) >>
836 fs_info->sb->s_blocksize_bits;
838 if (found_key.type != BTRFS_EXTENT_CSUM_KEY ||
839 found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
840 csum_offset >= MAX_CSUM_ITEMS(fs_info, csum_size)) {
841 goto insert;
844 if (csum_offset == btrfs_item_size_nr(leaf, path->slots[0]) /
845 csum_size) {
846 int extend_nr;
847 u64 tmp;
848 u32 diff;
849 u32 free_space;
851 if (btrfs_leaf_free_space(fs_info, leaf) <
852 sizeof(struct btrfs_item) + csum_size * 2)
853 goto insert;
855 free_space = btrfs_leaf_free_space(fs_info, leaf) -
856 sizeof(struct btrfs_item) - csum_size;
857 tmp = sums->len - total_bytes;
858 tmp >>= fs_info->sb->s_blocksize_bits;
859 WARN_ON(tmp < 1);
861 extend_nr = max_t(int, 1, (int)tmp);
862 diff = (csum_offset + extend_nr) * csum_size;
863 diff = min(diff,
864 MAX_CSUM_ITEMS(fs_info, csum_size) * csum_size);
866 diff = diff - btrfs_item_size_nr(leaf, path->slots[0]);
867 diff = min(free_space, diff);
868 diff /= csum_size;
869 diff *= csum_size;
871 btrfs_extend_item(fs_info, path, diff);
872 ret = 0;
873 goto csum;
876 insert:
877 btrfs_release_path(path);
878 csum_offset = 0;
879 if (found_next) {
880 u64 tmp;
882 tmp = sums->len - total_bytes;
883 tmp >>= fs_info->sb->s_blocksize_bits;
884 tmp = min(tmp, (next_offset - file_key.offset) >>
885 fs_info->sb->s_blocksize_bits);
887 tmp = max_t(u64, 1, tmp);
888 tmp = min_t(u64, tmp, MAX_CSUM_ITEMS(fs_info, csum_size));
889 ins_size = csum_size * tmp;
890 } else {
891 ins_size = csum_size;
893 path->leave_spinning = 1;
894 ret = btrfs_insert_empty_item(trans, root, path, &file_key,
895 ins_size);
896 path->leave_spinning = 0;
897 if (ret < 0)
898 goto fail_unlock;
899 if (WARN_ON(ret != 0))
900 goto fail_unlock;
901 leaf = path->nodes[0];
902 csum:
903 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_csum_item);
904 item_end = (struct btrfs_csum_item *)((unsigned char *)item +
905 btrfs_item_size_nr(leaf, path->slots[0]));
906 item = (struct btrfs_csum_item *)((unsigned char *)item +
907 csum_offset * csum_size);
908 found:
909 ins_size = (u32)(sums->len - total_bytes) >>
910 fs_info->sb->s_blocksize_bits;
911 ins_size *= csum_size;
912 ins_size = min_t(u32, (unsigned long)item_end - (unsigned long)item,
913 ins_size);
914 write_extent_buffer(leaf, sums->sums + index, (unsigned long)item,
915 ins_size);
917 ins_size /= csum_size;
918 total_bytes += ins_size * fs_info->sectorsize;
919 index += ins_size;
921 btrfs_mark_buffer_dirty(path->nodes[0]);
922 if (total_bytes < sums->len) {
923 btrfs_release_path(path);
924 cond_resched();
925 goto again;
927 out:
928 btrfs_free_path(path);
929 return ret;
931 fail_unlock:
932 goto out;
935 void btrfs_extent_item_to_extent_map(struct btrfs_inode *inode,
936 const struct btrfs_path *path,
937 struct btrfs_file_extent_item *fi,
938 const bool new_inline,
939 struct extent_map *em)
941 struct btrfs_fs_info *fs_info = btrfs_sb(inode->vfs_inode.i_sb);
942 struct btrfs_root *root = inode->root;
943 struct extent_buffer *leaf = path->nodes[0];
944 const int slot = path->slots[0];
945 struct btrfs_key key;
946 u64 extent_start, extent_end;
947 u64 bytenr;
948 u8 type = btrfs_file_extent_type(leaf, fi);
949 int compress_type = btrfs_file_extent_compression(leaf, fi);
951 em->bdev = fs_info->fs_devices->latest_bdev;
952 btrfs_item_key_to_cpu(leaf, &key, slot);
953 extent_start = key.offset;
955 if (type == BTRFS_FILE_EXTENT_REG ||
956 type == BTRFS_FILE_EXTENT_PREALLOC) {
957 extent_end = extent_start +
958 btrfs_file_extent_num_bytes(leaf, fi);
959 } else if (type == BTRFS_FILE_EXTENT_INLINE) {
960 size_t size;
961 size = btrfs_file_extent_ram_bytes(leaf, fi);
962 extent_end = ALIGN(extent_start + size,
963 fs_info->sectorsize);
966 em->ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
967 if (type == BTRFS_FILE_EXTENT_REG ||
968 type == BTRFS_FILE_EXTENT_PREALLOC) {
969 em->start = extent_start;
970 em->len = extent_end - extent_start;
971 em->orig_start = extent_start -
972 btrfs_file_extent_offset(leaf, fi);
973 em->orig_block_len = btrfs_file_extent_disk_num_bytes(leaf, fi);
974 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
975 if (bytenr == 0) {
976 em->block_start = EXTENT_MAP_HOLE;
977 return;
979 if (compress_type != BTRFS_COMPRESS_NONE) {
980 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
981 em->compress_type = compress_type;
982 em->block_start = bytenr;
983 em->block_len = em->orig_block_len;
984 } else {
985 bytenr += btrfs_file_extent_offset(leaf, fi);
986 em->block_start = bytenr;
987 em->block_len = em->len;
988 if (type == BTRFS_FILE_EXTENT_PREALLOC)
989 set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
991 } else if (type == BTRFS_FILE_EXTENT_INLINE) {
992 em->block_start = EXTENT_MAP_INLINE;
993 em->start = extent_start;
994 em->len = extent_end - extent_start;
996 * Initialize orig_start and block_len with the same values
997 * as in inode.c:btrfs_get_extent().
999 em->orig_start = EXTENT_MAP_HOLE;
1000 em->block_len = (u64)-1;
1001 if (!new_inline && compress_type != BTRFS_COMPRESS_NONE) {
1002 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
1003 em->compress_type = compress_type;
1005 } else {
1006 btrfs_err(fs_info,
1007 "unknown file extent item type %d, inode %llu, offset %llu, "
1008 "root %llu", type, btrfs_ino(inode), extent_start,
1009 root->root_key.objectid);